repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
gangadhar-kadam/verve_live_erp | erpnext/patches/v4_0/create_price_list_if_missing.py | 119 | 1087 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils.nestedset import get_root_of
def execute():
# setup not complete
if not frappe.db.sql("""select name from tabCompany limit 1"""):
return
if "shopping_cart" in frappe.get_installed_apps():
frappe.reload_doc("shopping_cart", "doctype", "shopping_cart_settings")
if not frappe.db.sql("select name from `tabPrice List` where buying=1"):
create_price_list(_("Standard Buying"), buying=1)
if not frappe.db.sql("select name from `tabPrice List` where selling=1"):
create_price_list(_("Standard Selling"), selling=1)
def create_price_list(pl_name, buying=0, selling=0):
price_list = frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": buying,
"selling": selling,
"currency": frappe.db.get_default("currency"),
"territories": [{
"territory": get_root_of("Territory")
}]
})
price_list.insert()
| agpl-3.0 |
openstack/cloudbase-init | cloudbaseinit/plugins/common/trim.py | 4 | 1284 | # Copyright 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as oslo_logging
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins.common import base as plugin_base
CONF = cloudbaseinit_conf.CONF
LOG = oslo_logging.getLogger(__name__)
class TrimConfigPlugin(plugin_base.BasePlugin):
def execute(self, service, shared_data):
osutils = osutils_factory.get_os_utils()
osutils.enable_trim(CONF.trim_enabled)
LOG.info("TRIM enabled status: %s", CONF.trim_enabled)
return plugin_base.PLUGIN_EXECUTION_DONE, False
def get_os_requirements(self):
return 'win32', (6, 1)
| apache-2.0 |
memtoko/django | django/contrib/gis/geos/prototypes/predicates.py | 103 | 1794 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# ## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args:
argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
# ## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
# ## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| bsd-3-clause |
SymbiFlow/prjuray-tools | tools/bits.py | 1 | 2620 | #!/usr/bin/env python3
# coding: utf-8
#
# Copyright 2020 Project U-Ray Authors
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file.
#
# SPDX-License-Identifier: ISC
import re
import sys
import json
# Usage: frames.txt tiles.txt tilegrid.json
def main():
frame_line_re = re.compile(r'0x([0-9A-Fa-f]+).*')
frame_rc_height = {}
with open(sys.argv[1], 'r') as f:
for line in f:
m = frame_line_re.match(line)
if not m:
continue
frame = int(m.group(1), 16)
bus = (frame >> 24) & 0x7
half = (frame >> 23) & 0x1
row = (frame >> 18) & 0x1F
col = (frame >> 8) & 0x3FF
minor = frame & 0xFF
if bus != 0 or half != 0:
continue
if (row, col) not in frame_rc_height:
frame_rc_height[(row, col, frame & ~0xFF)] = minor + 1
else:
frame_rc_height[(row, col, frame & ~0xFF)] = max(
frame_rc_height[(row, col)], minor + 1)
tiles_to_xy = {}
with open(sys.argv[2], 'r') as tilef:
for line in tilef:
sl = line.strip().split(",")
if len(sl) < 4:
continue
x = int(sl[0])
y = int(sl[1])
name = sl[2]
tiles_to_xy[name] = (x, y)
with open(sys.argv[3]) as tb_f:
tbj = json.load(tb_f)
frames_to_tiles = {}
for tilename, tiledata in tbj.items():
tile_offset = 0
for chunk in tiledata:
frame, start, size = chunk
if frame not in frames_to_tiles:
frames_to_tiles[frame] = []
name = tilename.split(":")[0]
frames_to_tiles[frame].append((start, tiles_to_xy[name][1],
tiles_to_xy[name][0], name))
tile_offset += size
for frame, tiles in frames_to_tiles.items():
tiles.sort()
for rc, height in sorted(frame_rc_height.items()):
row, col, frame = rc
line = "%08x %6d %6d %6d" % (frame, row, col, height)
print(line)
frame = (row << 18) | (col << 8)
last_start = 0
if frame in frames_to_tiles and len(frames_to_tiles[frame]) > 0:
for tile in frames_to_tiles[frame]:
start, ty, tx, tname = tile
print(" %6d (%4d) %6d %6d %s" %
(start, start - last_start, tx, ty, tname))
last_start = start
if __name__ == "__main__":
main()
| isc |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/docutils-0.10-py2.7.egg/docutils/readers/pep.py | 136 | 1555 | # $Id: pep.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=True, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| gpl-2.0 |
sgallagher/anaconda | pyanaconda/ui/gui/spokes/lib/resize.py | 1 | 23693 | # Disk resizing dialog
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from collections import namedtuple
from blivet.size import Size
from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.i18n import _, C_, N_, P_
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.modules.common.structures.storage import OSData, DeviceData, DeviceFormatData
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import blockedHandler, escape_markup, timed_action
import gi
gi.require_version("Gdk", "3.0")
gi.require_version("Gtk", "3.0")
from gi.repository import Gdk, Gtk
__all__ = ["ResizeDialog"]
DEVICE_NAME_COL = 0
DESCRIPTION_COL = 1
FILESYSTEM_COL = 2
RECLAIMABLE_COL = 3
ACTION_COL = 4
EDITABLE_COL = 5
TYPE_COL = 6
TOOLTIP_COL = 7
RESIZE_TARGET_COL = 8
TY_NORMAL = 0
TY_FREE_SPACE = 1
TY_PROTECTED = 2
PartStoreRow = namedtuple("PartStoreRow", ["name", "desc", "fs", "reclaimable",
"action", "editable", "ty",
"tooltip", "target"])
PRESERVE = N_("Preserve")
SHRINK = N_("Shrink")
DELETE = N_("Delete")
NOTHING = ""
log = get_module_logger(__name__)
class ResizeDialog(GUIObject):
builderObjects = ["actionStore", "diskStore", "resizeDialog", "resizeAdjustment"]
mainWidgetName = "resizeDialog"
uiFile = "spokes/lib/resize.glade"
def __init__(self, data, payload, partitioning, disks):
super().__init__(data)
self._disks = disks
# Get the device tree.
self._device_tree = STORAGE.get_proxy(
partitioning.GetDeviceTree()
)
# Get roots of existing systems.
self._roots = OSData.from_structure_list(
self._device_tree.GetExistingSystems()
)
# Get the required device size.
required_space = payload.space_required.get_bytes()
required_size = self._device_tree.GetRequiredDeviceSize(required_space)
self._required_size = Size(required_size)
self._initial_free_space = Size(0)
self._selected_reclaimable_space = Size(0)
self._can_shrink_something = False
self._disk_store = self.builder.get_object("diskStore")
self._selection = self.builder.get_object("diskView-selection")
self._view = self.builder.get_object("diskView")
self._disk_store = self.builder.get_object("diskStore")
self._reclaimable_label = self.builder.get_object("reclaimableSpaceLabel")
self._selected_label = self.builder.get_object("selectedSpaceLabel")
self._required_label = self.builder.get_object("requiredSpaceLabel")
self._required_label.set_markup(
_("Installation requires a total of <b>%s</b> for system data.")
% escape_markup(str(self._required_size))
)
self._reclaim_desc_label = self.builder.get_object("reclaimDescLabel")
self._resize_button = self.builder.get_object("resizeButton")
self._preserve_button = self.builder.get_object("preserveButton")
self._shrink_button = self.builder.get_object("shrinkButton")
self._delete_button = self.builder.get_object("deleteButton")
self._resize_slider = self.builder.get_object("resizeSlider")
def _get_partition_description(self, device_data, format_data):
# First, try to find the partition in some known root.
# If we find it, return the mount point as the description.
for root in self._roots:
for mount_point, device_name in root.mount_points.items():
if device_name == device_data.name:
return "{mount_point} ({os_name})".format(
mount_point=mount_point, os_name=root.os_name
)
# Otherwise, fall back on increasingly vague information.
if device_data.children:
return device_data.children[0]
if "label" in format_data.attrs:
return format_data.attrs["label"]
return format_data.description
def _get_tooltip(self, device_data):
if device_data.protected:
return _("This device contains the installation source.")
else:
return None
def populate(self, disks):
self._initial_free_space = Size(0)
self._selected_reclaimable_space = Size(0)
self._can_shrink_something = False
total_disks = 0
total_reclaimable_space = Size(0)
for disk_name in disks:
disk_reclaimable_space = self._add_disk(disk_name)
total_reclaimable_space += disk_reclaimable_space
total_disks += 1
self._update_labels(total_disks, total_reclaimable_space, 0)
description = _(
"You can remove existing file systems you no longer need to free up space for "
"this installation. Removing a file system will permanently delete all of the "
"data it contains."
)
if self._can_shrink_something:
description += "\n\n"
description += _("There is also free space available in pre-existing file systems. "
"While it's risky and we recommend you back up your data first, you "
"can recover that free disk space and make it available for this "
"installation below.")
self._reclaim_desc_label.set_text(description)
self._update_reclaim_button(Size(0))
def _add_disk(self, device_name):
# Get the device data.
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
format_data = DeviceFormatData.from_structure(
self._device_tree.GetFormatData(device_name)
)
# First add the disk itself.
is_partitioned = self._device_tree.IsDevicePartitioned(device_name)
if is_partitioned:
fs_type = ""
disk_reclaimable_space = Size(0)
else:
fs_type = format_data.description
disk_reclaimable_space = Size(device_data.size)
description = "{} {}".format(
Size(device_data.size).human_readable(max_places=1),
device_data.description
)
itr = self._disk_store.append(None, [
device_name,
description,
fs_type,
"<span foreground='grey' style='italic'>%s total</span>",
_(PRESERVE),
not device_data.protected,
TY_NORMAL,
self._get_tooltip(device_data),
int(device_data.size),
])
# Then add all its partitions.
partitions = self._device_tree.GetDevicePartitions(device_name)
for child_name in partitions:
free_size = self._add_partition(itr, child_name)
disk_reclaimable_space += free_size
# And then add another uneditable line that lists how much space is
# already free in the disk.
self._add_free_space(itr, device_name)
# And then go back and fill in the total reclaimable space for the
# disk, now that we know what each partition has reclaimable.
self._disk_store[itr][RECLAIMABLE_COL] = \
self._disk_store[itr][RECLAIMABLE_COL] % disk_reclaimable_space
return disk_reclaimable_space
def _add_partition(self, itr, device_name):
# Get the device data.
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
format_data = DeviceFormatData.from_structure(
self._device_tree.GetFormatData(device_name)
)
# Calculate the free size.
# Devices that are not resizable are still deletable.
is_resizable = self._device_tree.IsDeviceResizable(device_name)
size_limits = self._device_tree.GetDeviceSizeLimits(device_name)
min_size = Size(size_limits[0])
device_size = Size(device_data.size)
if is_resizable:
free_size = device_size - min_size
resize_string = _("%(freeSize)s of %(devSize)s") % {
"freeSize": free_size.human_readable(max_places=1),
"devSize": device_size.human_readable(max_places=1)
}
if not device_data.protected:
self._can_shrink_something = True
else:
free_size = device_size
resize_string = "<span foreground='grey'>%s</span>" % \
escape_markup(_("Not resizeable"))
# Choose the type.
if device_data.protected:
ty = TY_PROTECTED
else:
ty = TY_NORMAL
# Generate the description.
description = self._get_partition_description(device_data, format_data)
# Add a new row.
self._disk_store.append(itr, [
device_name,
description,
format_data.description,
resize_string,
_(PRESERVE),
not device_data.protected,
ty,
self._get_tooltip(device_data),
int(device_size),
])
return free_size
def _add_free_space(self, itr, device_name):
# Calculate the free space.
disk_free = Size(self._device_tree.GetDiskFreeSpace([device_name]))
if disk_free < Size("1MiB"):
return
# Add a new row.
free_space_string = "<span foreground='grey' style='italic'>{}</span>".format(
escape_markup(_("Free space"))
)
disk_free_string = "<span foreground='grey' style='italic'>{}</span>".format(
escape_markup(disk_free.human_readable(max_places=1))
)
self._disk_store.append(itr, [
"",
free_space_string,
"",
disk_free_string,
NOTHING,
False,
TY_FREE_SPACE,
None,
disk_free,
])
# Update the total free space.
self._initial_free_space += disk_free
def _update_labels(self, num_disks=None, total_reclaimable=None, selected_reclaimable=None):
if num_disks is not None and total_reclaimable is not None:
text = P_(
"<b>%(count)s disk; %(size)s reclaimable space</b> (in file systems)",
"<b>%(count)s disks; %(size)s reclaimable space</b> (in file systems)",
num_disks
) % {
"count": escape_markup(str(num_disks)),
"size": escape_markup(total_reclaimable)
}
self._reclaimable_label.set_markup(text)
if selected_reclaimable is not None:
text = _("Total selected space to reclaim: <b>%s</b>") \
% escape_markup(selected_reclaimable)
self._selected_label.set_markup(text)
def _setup_slider(self, min_size, max_size, default_size):
"""Set up the slider for the given device.
Set up the slider for this device, pulling out any previously given
shrink value as the default. This also sets up the ticks on the
slider and keyboard support. Any devices that are not resizable
will not have a slider displayed, so they do not need to be worried
with here.
:param min_size: min value to set
:type min_size: Size
:param max_size: max value to set
:type max_size: Size
:param default_size: default value to set
:type default_size: Size
"""
# Convert the Sizes to ints
min_size = int(min_size)
max_size = int(max_size)
default_value = int(default_size)
# The slider needs to be keyboard-accessible. We'll make small movements change in
# 1% increments, and large movements in 5% increments.
distance = max_size - min_size
one_percent = int(distance / 100)
five_percent = int(distance / 20)
twenty_percent = int(distance / 5)
with blockedHandler(self._resize_slider, self.on_resize_value_changed):
self._resize_slider.set_range(min_size, max_size)
self._resize_slider.set_value(default_value)
adjustment = self.builder.get_object("resizeAdjustment")
adjustment.configure(default_value, min_size, max_size, one_percent, five_percent, 0)
# And then the slider needs a couple tick marks for easier navigation.
self._resize_slider.clear_marks()
for i in range(1, 5):
self._resize_slider.add_mark(
min_size + i * twenty_percent, Gtk.PositionType.BOTTOM, None
)
# Finally, add tick marks for the ends.
self._resize_slider.add_mark(min_size, Gtk.PositionType.BOTTOM, str(Size(min_size)))
self._resize_slider.add_mark(max_size, Gtk.PositionType.BOTTOM, str(Size(max_size)))
def _update_action_buttons(self):
# Update buttons for the selected row.
itr = self._selection.get_selected()[1]
if not itr:
return
row = self._disk_store[itr]
obj = PartStoreRow(*row)
self._preserve_button.set_sensitive(obj.editable)
self._shrink_button.set_sensitive(obj.editable)
self._delete_button.set_sensitive(obj.editable)
self._resize_slider.set_visible(False)
if not obj.editable:
return
device_name = obj.name
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
# If the selected filesystem does not support shrinking, make that
# button insensitive.
is_resizable = self._device_tree.IsDeviceResizable(device_name)
self._shrink_button.set_sensitive(is_resizable)
if is_resizable:
min_size = self._device_tree.GetDeviceSizeLimits(device_name)[0]
self._setup_slider(min_size, device_data.size, Size(obj.target))
# Then, disable the button for whatever action is currently selected.
# It doesn't make a lot of sense to allow clicking that.
if obj.action == _(PRESERVE):
self._preserve_button.set_sensitive(False)
elif obj.action == _(SHRINK):
self._shrink_button.set_sensitive(False)
self._resize_slider.set_visible(True)
elif obj.action == _(DELETE):
self._delete_button.set_sensitive(False)
def _update_reclaim_button(self, got):
self._resize_button.set_sensitive(got + self._initial_free_space >= self._required_size)
# pylint: disable=arguments-differ
def refresh(self):
super().refresh()
# clear out the store and repopulate it from the devicetree
self._disk_store.clear()
self.populate(self._disks)
self._view.expand_all()
def run(self):
rc = self.window.run()
self.window.destroy()
return rc
# Signal handlers.
def on_key_pressed(self, window, event, *args):
# Handle any keyboard events. Right now this is just delete for
# removing a partition, but it could include more later.
if not event or event and event.type != Gdk.EventType.KEY_RELEASE:
return
if event.keyval == Gdk.KEY_Delete and self._delete_button.get_sensitive():
self._delete_button.emit("clicked")
def _sum_reclaimable_space(self, model, path, itr, *args):
obj = PartStoreRow(*model[itr])
if not obj.name:
return False
device_name = obj.name
is_partitioned = self._device_tree.IsDevicePartitioned(device_name)
if is_partitioned:
return False
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
if obj.action == _(PRESERVE):
return False
elif obj.action == _(SHRINK):
self._selected_reclaimable_space += Size(device_data.size) - Size(obj.target)
elif obj.action == _(DELETE):
self._selected_reclaimable_space += Size(device_data.size)
return False
def on_preserve_clicked(self, button):
itr = self._selection.get_selected()[1]
self._on_action_changed(itr, PRESERVE)
def on_shrink_clicked(self, button):
itr = self._selection.get_selected()[1]
self._on_action_changed(itr, SHRINK)
def on_delete_clicked(self, button):
itr = self._selection.get_selected()[1]
self._on_action_changed(itr, DELETE)
def _on_action_changed(self, itr, new_action):
if not itr:
return
# Handle the row selected when a button was pressed.
selected_row = self._disk_store[itr]
selected_row[ACTION_COL] = _(new_action)
# If that row is a disk header, we need to process all the partitions
# it contains.
device_name = selected_row[DEVICE_NAME_COL]
is_partitioned = self._device_tree.IsDevicePartitioned(device_name)
if is_partitioned:
part_itr = self._disk_store.iter_children(itr)
while part_itr:
# Immutable entries are those that we can't do anything to - like
# the free space lines. We just want to leave them in the display
# for information, but you can't choose to preserve/delete/shrink
# them.
if self._disk_store[part_itr][TYPE_COL] in [TY_FREE_SPACE, TY_PROTECTED]:
part_itr = self._disk_store.iter_next(part_itr)
continue
self._disk_store[part_itr][ACTION_COL] = _(new_action)
# If the user marked a whole disk for deletion, they can't go in and
# un-delete partitions under it.
if new_action == DELETE:
self._disk_store[part_itr][EDITABLE_COL] = False
elif new_action == PRESERVE:
part_name = self._disk_store[part_itr][DEVICE_NAME_COL]
part_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(part_name)
)
self._disk_store[part_itr][EDITABLE_COL] = not part_data.protected
part_itr = self._disk_store.iter_next(part_itr)
# And then we're keeping a running tally of how much space the user
# has selected to reclaim, so reflect that in the UI.
self._selected_reclaimable_space = Size(0)
self._disk_store.foreach(self._sum_reclaimable_space, None)
self._update_labels(selected_reclaimable=self._selected_reclaimable_space)
self._update_reclaim_button(self._selected_reclaimable_space)
self._update_action_buttons()
def _collect_actionable_rows(self, model, path, itr, rows):
"""Collect rows that can be transformed into actions."""
obj = PartStoreRow(*model[itr])
if not obj.name:
return False
if not obj.editable:
return False
rows.append(obj)
return False
def _schedule_actions(self, obj):
"""Schedule actions for the given row object."""
if obj.action == _(PRESERVE):
log.debug("Preserve %s.", obj.name)
elif obj.action == _(SHRINK):
log.debug("Shrink %s to %s.", obj.name, Size(obj.target))
self._device_tree.ShrinkDevice(obj.name, obj.target)
elif obj.action == _(DELETE):
log.debug("Remove %s.", obj.name)
self._device_tree.RemoveDevice(obj.name)
def on_resize_clicked(self, *args):
rows = []
# Collect the rows.
self._disk_store.foreach(self._collect_actionable_rows, rows)
# Process rows in the reversed order. If there is a disk with
# two logical partitions sda5 and sda6 and we remove sda5, Blivet
# renames the partition sda6 to sda5, so the actions for sda6 are
# no longer valid. See the bug 1856496.
for obj in reversed(rows):
self._schedule_actions(obj)
def on_delete_all_clicked(self, button, *args):
if button.get_label() == C_("GUI|Reclaim Dialog", "Delete _all"):
action = DELETE
button.set_label(C_("GUI|Reclaim Dialog", "Preserve _all"))
else:
action = PRESERVE
button.set_label(C_("GUI|Reclaim Dialog", "Delete _all"))
itr = self._disk_store.get_iter_first()
while itr:
obj = PartStoreRow(*self._disk_store[itr])
if not obj.editable:
itr = self._disk_store.iter_next(itr)
continue
device_name = obj.name
device_data = DeviceData.from_structure(
self._device_tree.GetDeviceData(device_name)
)
if device_data.is_disk:
self._on_action_changed(itr, action)
itr = self._disk_store.iter_next(itr)
def on_row_clicked(self, view, path, column):
# This handles when the user clicks on a row in the view. We use it
# only for expanding/collapsing disk headers.
if view.row_expanded(path):
view.collapse_row(path)
else:
view.expand_row(path, True)
def on_selection_changed(self, selection):
# This handles when the selection changes. It's very similar to what
# on_row_clicked above does, but this handler only deals with changes in
# selection. Thus, clicking on a disk header to collapse it and then
# immediately clicking on it again to expand it would not work when
# dealt with here.
self._update_action_buttons()
@timed_action(delay=200, threshold=500, busy_cursor=False)
def on_resize_value_changed(self, rng):
(model, itr) = self._selection.get_selected()
old_delta = Size(rng.get_adjustment().get_upper()) - int(model[itr][RESIZE_TARGET_COL])
self._selected_reclaimable_space -= old_delta
# Update the target size in the store.
model[itr][RESIZE_TARGET_COL] = Size(rng.get_value())
# Update the "Total selected space" label.
delta = Size(rng.get_adjustment().get_upper()) - int(rng.get_value())
self._selected_reclaimable_space += delta
self._update_labels(selected_reclaimable=self._selected_reclaimable_space)
# And then the reclaim button, in case they've made enough space.
self._update_reclaim_button(self._selected_reclaimable_space)
def resize_slider_format(self, scale, value):
# This makes the value displayed under the slider prettier than just a
# single number.
return str(Size(value))
| gpl-2.0 |
amyliu345/zulip | zerver/management/commands/set_message_flags.py | 32 | 3220 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Iterable
from optparse import make_option
import logging
import sys
from django.core.management.base import BaseCommand, CommandParser
from zerver.lib import utils
from zerver.models import UserMessage, get_user_profile_by_email
from django.db import models
class Command(BaseCommand):
help = """Sets user message flags. Used internally by actions.py. Marks all
Expects a comma-delimited list of user message ids via stdin, and an EOF to terminate."""
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('-r', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually change message flags. Default is a dry run.")
parser.add_argument('-f', '--flag',
dest='flag',
type=str,
help="The flag to add of remove")
parser.add_argument('-o', '--op',
dest='op',
type=str,
help="The operation to do: 'add' or 'remove'")
parser.add_argument('-u', '--until',
dest='all_until',
type=str,
help="Mark all messages <= specific usermessage id")
parser.add_argument('-m', '--email',
dest='email',
type=str,
help="Email to set messages for")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if not options["flag"] or not options["op"] or not options["email"]:
print("Please specify an operation, a flag and an email")
exit(1)
op = options['op']
flag = getattr(UserMessage.flags, options['flag'])
all_until = options['all_until']
email = options['email']
user_profile = get_user_profile_by_email(email)
if all_until:
filt = models.Q(id__lte=all_until)
else:
filt = models.Q(message__id__in=[mid.strip() for mid in sys.stdin.read().split(',')])
mids = [m.id for m in
UserMessage.objects.filter(filt, user_profile=user_profile).order_by('-id')]
if options["for_real"]:
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
def do_update(batch):
# type: (Iterable[int]) -> None
msgs = UserMessage.objects.filter(id__in=batch)
if op == 'add':
msgs.update(flags=models.F('flags').bitor(flag))
elif op == 'remove':
msgs.update(flags=models.F('flags').bitand(~flag))
if not options["for_real"]:
logging.info("Updating %s by %s %s" % (mids, op, flag))
logging.info("Dry run completed. Run with --for-real to change message flags.")
exit(1)
utils.run_in_batches(mids, 400, do_update, sleep_time=3)
exit(0)
| apache-2.0 |
leorochael/odoo | addons/project_issue/__init__.py | 433 | 1131 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dushu1203/chromium.src | chrome/common/extensions/docs/server2/caching_file_system.py | 16 | 8264 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import sys
from file_system import FileSystem, StatInfo, FileNotFoundError
from future import All, Future
from path_util import AssertIsDirectory, IsDirectory, ToDirectory
from third_party.json_schema_compiler.memoize import memoize
class CachingFileSystem(FileSystem):
'''FileSystem which implements a caching layer on top of |file_system|. If
|fail_on_miss| is True then cache misses throw a FileNotFoundError rather than
falling back onto the underlying FileSystem.
If the underlying FileSystem is versioned (i.e., it implements GetVersion to
return something other than None), this will create a persistent stat cache
(keyed on the FileSystem instance's version) as an additional optimization.
'''
def __init__(self, file_system, object_store_creator, fail_on_miss=False):
self._file_system = file_system
self._fail_on_miss = fail_on_miss
def create_object_store(category, try_versioning=False, **optargs):
version = file_system.GetVersion()
versioned = try_versioning and version is not None
if versioned:
identity = '%s/%s' % (file_system.GetIdentity(), version)
else:
identity = file_system.GetIdentity()
optargs['start_empty'] = optargs.get('start_empty', not versioned)
return object_store_creator.Create(
CachingFileSystem,
category='%s/%s' % (identity, category),
**optargs)
self._stat_cache = create_object_store('stat', try_versioning=True)
# The read caches can start populated (start_empty=False) because file
# updates are picked up by the stat, so it doesn't need the force-refresh
# which starting empty is designed for. Without this optimisation, cron
# runs are extra slow.
self._read_cache = create_object_store('read', start_empty=False)
self._walk_cache = create_object_store('walk', start_empty=False)
def Refresh(self):
return self._file_system.Refresh()
def StatAsync(self, path):
'''Stats the directory given, or if a file is given, stats the file's parent
directory to get info about the file.
'''
# Always stat the parent directory, since it will have the stat of the child
# anyway, and this gives us an entire directory's stat info at once.
dir_path, file_path = posixpath.split(path)
dir_path = ToDirectory(dir_path)
def make_stat_info(dir_stat):
'''Converts a dir stat into the correct resulting StatInfo; if the Stat
was for a file, the StatInfo should just contain that file.
'''
if path == dir_path:
return dir_stat
# Was a file stat. Extract that file.
file_version = dir_stat.child_versions.get(file_path)
if file_version is None:
raise FileNotFoundError('No stat found for %s in %s (found %s)' %
(path, dir_path, dir_stat.child_versions))
return StatInfo(file_version)
def raise_cache_miss(path):
raise FileNotFoundError('Got cache miss when trying to stat %s' % path)
dir_stat = self._stat_cache.Get(dir_path).Get()
if dir_stat is not None:
return Future(callback=lambda: make_stat_info(dir_stat))
if self._fail_on_miss:
logging.warning('Bailing on stat cache miss for %s' % dir_path)
return Future(callback=lambda: raise_cache_miss(dir_path))
def next(dir_stat):
assert dir_stat is not None # should have raised a FileNotFoundError
# We only ever need to cache the dir stat.
self._stat_cache.Set(dir_path, dir_stat)
return make_stat_info(dir_stat)
return self._MemoizedStatAsyncFromFileSystem(dir_path).Then(next)
@memoize
def _MemoizedStatAsyncFromFileSystem(self, dir_path):
'''This is a simple wrapper to memoize Futures to directory stats, since
StatAsync makes heavy use of it. Only cache directories so that the
memoized cache doesn't blow up.
'''
assert IsDirectory(dir_path)
return self._file_system.StatAsync(dir_path)
def Read(self, paths, skip_not_found=False):
'''Reads a list of files. If a file is cached and it is not out of
date, it is returned. Otherwise, the file is retrieved from the file system.
'''
# Files which aren't found are cached in the read object store as
# (path, None, None). This is to prevent re-reads of files we know
# do not exist.
cached_read_values = self._read_cache.GetMulti(paths).Get()
cached_stat_values = self._stat_cache.GetMulti(paths).Get()
# Populate a map of paths to Futures to their stat. They may have already
# been cached in which case their Future will already have been constructed
# with a value.
stat_futures = {}
def handle(error):
if isinstance(error, FileNotFoundError):
return None
raise error
for path in paths:
stat_value = cached_stat_values.get(path)
if stat_value is None:
stat_future = self.StatAsync(path)
if skip_not_found:
stat_future = stat_future.Then(lambda x: x, handle)
else:
stat_future = Future(value=stat_value)
stat_futures[path] = stat_future
# Filter only the cached data which is up to date by comparing to the latest
# stat. The cached read data includes the cached version. Remove it for
# the result returned to callers. |version| == None implies a non-existent
# file, so skip it.
up_to_date_data = dict(
(path, data) for path, (data, version) in cached_read_values.iteritems()
if version is not None and stat_futures[path].Get().version == version)
if skip_not_found:
# Filter out paths which we know do not exist, i.e. if |path| is in
# |cached_read_values| *and* has a None version, then it doesn't exist.
# See the above declaration of |cached_read_values| for more information.
paths = [path for path in paths
if cached_read_values.get(path, (None, True))[1]]
if len(up_to_date_data) == len(paths):
# Everything was cached and up-to-date.
return Future(value=up_to_date_data)
def next(new_results):
# Update the cache. This is a path -> (data, version) mapping.
self._read_cache.SetMulti(
dict((path, (new_result, stat_futures[path].Get().version))
for path, new_result in new_results.iteritems()))
# Update the read cache to include files that weren't found, to prevent
# constantly trying to read a file we now know doesn't exist.
self._read_cache.SetMulti(
dict((path, (None, None)) for path in paths
if stat_futures[path].Get() is None))
new_results.update(up_to_date_data)
return new_results
# Read in the values that were uncached or old.
return self._file_system.Read(set(paths) - set(up_to_date_data.iterkeys()),
skip_not_found=skip_not_found).Then(next)
def GetCommitID(self):
return self._file_system.GetCommitID()
def GetPreviousCommitID(self):
return self._file_system.GetPreviousCommitID()
def Walk(self, root, depth=-1):
'''Overrides FileSystem.Walk() to provide caching functionality.
'''
def file_lister(root):
res, root_stat = All((self._walk_cache.Get(root),
self.StatAsync(root))).Get()
if res and res[2] == root_stat.version:
dirs, files = res[0], res[1]
else:
# Wasn't cached, or not up to date.
dirs, files = [], []
for f in self.ReadSingle(root).Get():
if IsDirectory(f):
dirs.append(f)
else:
files.append(f)
# Update the cache. This is a root -> (dirs, files, version) mapping.
self._walk_cache.Set(root, (dirs, files, root_stat.version))
return dirs, files
return self._file_system.Walk(root, depth=depth, file_lister=file_lister)
def GetIdentity(self):
return self._file_system.GetIdentity()
def GetVersion(self):
return self._file_system.GetVersion()
def __repr__(self):
return '%s of <%s>' % (type(self).__name__, repr(self._file_system))
| bsd-3-clause |
theochem/horton | horton/part/stockholder.py | 4 | 5171 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Base classes for all stockholder partitioning schemes'''
import numpy as np
from horton.log import log
from horton.grid.cext import CubicSpline
from horton.part.base import WPart
from horton.grid.poisson import solve_poisson_becke
__all__ = [
'StockholderWPart',
]
class StockHolderMixin(object):
def get_rgrid(self, index):
raise NotImplementedError
def get_proatom_rho(self, index, *args, **kwargs):
raise NotImplementedError
def fix_proatom_rho(self, index, rho, deriv):
'''Check if the radial density for the proatom is correct and fix as needed.
**Arguments:**
index
The atom for which this proatom rho is created.
rho
The radial density
deriv
the derivative of the radial density or None.
'''
rgrid = self.get_rgrid(index)
# Check for negative parts
original = rgrid.integrate(rho)
if rho.min() < 0:
rho[rho<0] = 0.0
deriv = None
error = rgrid.integrate(rho) - original
if log.do_medium:
log(' Pro-atom not positive everywhere. Lost %.1e electrons' % error)
return rho, deriv
def get_proatom_spline(self, index, *args, **kwargs):
# Get the radial density
rho, deriv = self.get_proatom_rho(index, *args, **kwargs)
# Double check and fix if needed
rho, deriv = self.fix_proatom_rho(index, rho, deriv)
# Make a spline
rtf = self.get_rgrid(index).rtransform
return CubicSpline(rho, deriv, rtf)
def eval_spline(self, index, spline, output, grid, label='noname'):
center = self.coordinates[index]
if log.do_debug:
number = self.numbers[index]
log(' Evaluating spline (%s) for atom %i (n=%i) on %i grid points' % (label, index, number, grid.size))
grid.eval_spline(spline, center, output)
def eval_proatom(self, index, output, grid):
spline = self.get_proatom_spline(index)
output[:] = 0.0
self.eval_spline(index, spline, output, grid, label='proatom')
output += 1e-100
assert np.isfinite(output).all()
def update_at_weights(self):
# This will reconstruct the promolecular density and atomic weights
# based on the current proatomic splines.
promoldens = self.cache.load('promoldens', alloc=self.grid.shape)[0]
promoldens[:] = 0
# update the promolecule density and store the proatoms in the at_weights
# arrays for later.
for index in xrange(self.natom):
grid = self.get_grid(index)
at_weights = self.cache.load('at_weights', index, alloc=grid.shape)[0]
self.update_pro(index, at_weights, promoldens)
# Compute the atomic weights by taking the ratios between proatoms and
# promolecules.
for index in xrange(self.natom):
at_weights = self.cache.load('at_weights', index)
at_weights /= self.to_atomic_grid(index, promoldens)
np.clip(at_weights, 0, 1, out=at_weights)
def update_pro(self, index, proatdens, promoldens):
raise NotImplementedError
def do_prosplines(self):
for index in xrange(self.natom):
# density
key = ('spline_prodensity', index)
if key not in self.cache:
if log.medium:
log('Storing proatom density spline for atom %i.' % index)
spline = self.get_proatom_spline(index)
self.cache.dump(key, spline, tags='o')
# hartree potential
key = ('spline_prohartree', index)
if key not in self.cache:
if log.medium:
log('Computing proatom hartree potential spline for atom %i.' % index)
rho_spline = self.cache.load('spline_prodensity', index)
v_spline = solve_poisson_becke([rho_spline])[0]
self.cache.dump(key, v_spline, tags='o')
class StockholderWPart(StockHolderMixin, WPart):
def update_pro(self, index, proatdens, promoldens):
work = self.grid.zeros()
self.eval_proatom(index, work, self.grid)
promoldens += work
proatdens[:] = self.to_atomic_grid(index, work)
| gpl-3.0 |
myerpengine/odoo | addons/delivery/__init__.py | 376 | 1103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import delivery
import partner
import sale
import stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dataxu/ansible | lib/ansible/modules/cloud/openstack/os_network.py | 5 | 7636 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
external:
description:
- Whether this network is externally accessible.
required: false
default: false
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
provider_physical_network:
description:
- The physical network where this network object is implemented.
required: false
default: None
version_added: "2.1"
provider_network_type:
description:
- The type of physical network that maps to this network resource.
required: false
default: None
version_added: "2.1"
provider_segmentation_id:
description:
- An isolated segment on the physical network. The I(network_type)
attribute defines the segmentation model. For example, if the
I(network_type) value is vlan, this ID is a vlan identifier. If
the I(network_type) value is gre, this ID is a gre key.
required: false
default: None
version_added: "2.1"
project:
description:
- Project name or ID containing the network (name admin-only)
required: false
default: None
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create an externally accessible network named 'ext_network'.
- os_network:
cloud: mycloud
state: present
name: ext_network
external: true
'''
RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Network ID.
type: string
sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
description: Network name.
type: string
sample: "ext_network"
shared:
description: Indicates whether this network is shared across all tenants.
type: bool
sample: false
status:
description: Network status.
type: string
sample: "ACTIVE"
mtu:
description: The MTU of a network resource.
type: integer
sample: 0
admin_state_up:
description: The administrative state of the network.
type: bool
sample: true
port_security_enabled:
description: The port security status
type: bool
sample: true
router:external:
description: Indicates whether this network is externally accessible.
type: bool
sample: true
tenant_id:
description: The tenant ID.
type: string
sample: "06820f94b9f54b119636be2728d216fc"
subnets:
description: The associated subnets.
type: list
sample: []
"provider:physical_network":
description: The physical network where this network object is implemented.
type: string
sample: my_vlan_net
"provider:network_type":
description: The type of physical network that maps to this network resource.
type: string
sample: vlan
"provider:segmentation_id":
description: An isolated segment on the physical network.
type: string
sample: 101
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
external=dict(default=False, type='bool'),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False),
provider_segmentation_id=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
external = module.params['external']
provider_physical_network = module.params['provider_physical_network']
provider_network_type = module.params['provider_network_type']
provider_segmentation_id = module.params['provider_segmentation_id']
project = module.params.get('project')
shade, cloud = openstack_cloud_from_module(module, min_version='1.6.0')
try:
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
net = cloud.get_network(name, filters=filters)
if state == 'present':
if not net:
provider = {}
if provider_physical_network:
provider['physical_network'] = provider_physical_network
if provider_network_type:
provider['network_type'] = provider_network_type
if provider_segmentation_id:
provider['segmentation_id'] = provider_segmentation_id
if project_id is not None:
net = cloud.create_network(name, shared, admin_state_up,
external, provider, project_id)
else:
net = cloud.create_network(name, shared, admin_state_up,
external, provider)
changed = True
else:
changed = False
module.exit_json(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
| gpl-3.0 |
yuange250/diydrones | Tools/autotest/autotest.py | 66 | 15351 | #!/usr/bin/env python
# APM automatic test suite
# Andrew Tridgell, October 2011
import pexpect, os, sys, shutil, atexit
import optparse, fnmatch, time, glob, traceback, signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pysim'))
import util
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
def get_default_params(atype):
'''get default parameters'''
# use rover simulator so SITL is not starved of input
from pymavlink import mavutil
HOME=mavutil.location(40.071374969556928,-105.22978898137808,1583.702759,246)
if atype in ['APMrover2', 'ArduPlane']:
frame = 'rover'
else:
frame = '+'
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sil = util.start_SIL(atype, wipe=True, model=frame, home=home, speedup=10)
mavproxy = util.start_MAVProxy_SIL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL(atype, model=frame, home=home, speedup=10)
mavproxy = util.start_MAVProxy_SIL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = util.reltopdir('../buildlogs/%s-defaults.parm' % atype)
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def build_all():
'''run the build_all.sh script'''
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
'''run the build_binaries.sh script'''
print("Running build_binaries.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig=util.reltopdir('Tools/scripts/build_binaries.sh')
copy=util.reltopdir('./build_binaries.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, dir=util.reltopdir('.')) != 0:
print("Failed build_binaries.sh")
return False
return True
def build_devrelease():
'''run the build_devrelease.sh script'''
print("Running build_devrelease.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig=util.reltopdir('Tools/scripts/build_devrelease.sh')
copy=util.reltopdir('./build_devrelease.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, dir=util.reltopdir('.')) != 0:
print("Failed build_devrelease.sh")
return False
return True
def build_examples():
'''run the build_examples.sh script'''
print("Running build_examples.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_examples.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_examples.sh")
return False
return True
def build_parameters():
'''run the param_parse.py script'''
print("Running param_parse.py")
if util.run_cmd(util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), dir=util.reltopdir('.')) != 0:
print("Failed param_parse.py")
return False
return True
def convert_gpx():
'''convert any tlog files to GPX and KML'''
import glob
mavlog = glob.glob(util.reltopdir("../buildlogs/*.tlog"))
for m in mavlog:
util.run_cmd(util.reltopdir("../mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml), checkfail=False)
util.run_cmd('zip %s.kmz %s.kml' % (m, m), checkfail=False)
util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m,m))
return True
def test_prerequisites():
'''check we have the right directories and tools to run tests'''
print("Testing prerequisites")
util.mkdir_p(util.reltopdir('../buildlogs'))
return True
def alarm_handler(signum, frame):
'''handle test timeout'''
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
write_fullresults()
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
############## main program #############
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
parser.add_option("-j", default=1, type='int', help='build CPUs')
opts, args = parser.parse_args()
import arducopter, arduplane, apmrover2
steps = [
'prerequisites',
'build.All',
'build.Binaries',
# 'build.DevRelease',
'build.Examples',
'build.Parameters',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'build.Helicopter',
'fly.CopterAVC',
'build.AntennaTracker',
'convertgpx',
]
skipsteps = opts.skip.split(',')
# ensure we catch timeouts
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
def skip_step(step):
'''see if a step should be skipped'''
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return True
return False
def run_step(step):
'''run one step'''
# remove old logs
util.run_cmd('/bin/rm -f logs/*.BIN logs/LASTLOG.TXT')
if step == "prerequisites":
return test_prerequisites()
if step == 'build.ArduPlane':
return util.build_SIL('ArduPlane', j=opts.j)
if step == 'build.APMrover2':
return util.build_SIL('APMrover2', j=opts.j)
if step == 'build.ArduCopter':
return util.build_SIL('ArduCopter', j=opts.j)
if step == 'build.AntennaTracker':
return util.build_SIL('AntennaTracker', j=opts.j)
if step == 'build.Helicopter':
return util.build_SIL('ArduCopter', target='sitl-heli', j=opts.j)
if step == 'defaults.ArduPlane':
return get_default_params('ArduPlane')
if step == 'defaults.ArduCopter':
return get_default_params('ArduCopter')
if step == 'defaults.APMrover2':
return get_default_params('APMrover2')
if step == 'fly.ArduCopter':
return arducopter.fly_ArduCopter(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.CopterAVC':
return arducopter.fly_CopterAVC(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.ArduPlane':
return arduplane.fly_ArduPlane(viewerip=opts.viewerip, map=opts.map)
if step == 'drive.APMrover2':
return apmrover2.drive_APMrover2(viewerip=opts.viewerip, map=opts.map)
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.DevRelease':
return build_devrelease()
if step == 'build.Examples':
return build_examples()
if step == 'build.Parameters':
return build_parameters()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
'''test result class'''
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
'''test result file'''
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
'''test results class'''
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, dir=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
'''add a result'''
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
'''add a result file'''
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
'''add a result image'''
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
'''add a set of files'''
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
'''add a set of images'''
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addimage(name, os.path.basename(f))
def write_webresults(results):
'''write webpage results'''
from pymavlink.generator import mavtemplate
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode='w')
t.write(f, html, results)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, util.reltopdir('../buildlogs/%s' % os.path.basename(f)))
def write_fullresults():
'''write out full results set'''
global results
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*-log.bin')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
results.addfile('ArduPlane build log', 'ArduPlane.txt')
results.addfile('ArduPlane code size', 'ArduPlane.sizes.txt')
results.addfile('ArduPlane stack sizes', 'ArduPlane.framesizes.txt')
results.addfile('ArduPlane defaults', 'ArduPlane-defaults.parm')
results.addglob("ArduPlane log", 'ArduPlane-*.BIN')
results.addglob("ArduPlane core", 'ArduPlane.core')
results.addglob("ArduPlane ELF", 'ArduPlane.elf')
results.addfile('ArduCopter build log', 'ArduCopter.txt')
results.addfile('ArduCopter code size', 'ArduCopter.sizes.txt')
results.addfile('ArduCopter stack sizes', 'ArduCopter.framesizes.txt')
results.addfile('ArduCopter defaults', 'ArduCopter-defaults.parm')
results.addglob("ArduCopter log", 'ArduCopter-*.BIN')
results.addglob("ArduCopter core", 'ArduCopter.core')
results.addglob("ArduCopter elf", 'ArduCopter.elf')
results.addglob("CopterAVC log", 'CopterAVC-*.BIN')
results.addglob("CopterAVC core", 'CopterAVC.core')
results.addfile('APMrover2 build log', 'APMrover2.txt')
results.addfile('APMrover2 code size', 'APMrover2.sizes.txt')
results.addfile('APMrover2 stack sizes', 'APMrover2.framesizes.txt')
results.addfile('APMrover2 defaults', 'APMrover2-defaults.parm')
results.addglob("APMrover2 log", 'APMrover2-*.BIN')
results.addglob("APMrover2 core", 'APMrover2.core')
results.addglob("APMrover2 ELF", 'APMrover2.elf')
results.addfile('AntennaTracker build log', 'AntennaTracker.txt')
results.addfile('AntennaTracker code size', 'AntennaTracker.sizes.txt')
results.addfile('AntennaTracker stack sizes', 'AntennaTracker.framesizes.txt')
results.addglob("AntennaTracker ELF", 'AntennaTracker.elf')
results.addglob('APM:Libraries documentation', 'docs/libraries/index.html')
results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html')
results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html')
results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
results = TestResults()
def check_logs(step):
'''check for log files from a step'''
print("check step: ", step)
if step.startswith('fly.'):
vehicle = step[4:]
elif step.startswith('drive.'):
vehicle = step[6:]
else:
return
logs = glob.glob("logs/*.BIN")
for log in logs:
bname = os.path.basename(log)
newname = util.reltopdir("../buildlogs/%s-%s" % (vehicle, bname))
print("Renaming %s to %s" % (log, newname))
os.rename(log, newname)
corefile = "core"
if os.path.exists(corefile):
newname = util.reltopdir("../buildlogs/%s.core" % vehicle)
print("Renaming %s to %s" % (corefile, newname))
os.rename(corefile, newname)
util.run_cmd('/bin/cp A*/A*.elf ../buildlogs', dir=util.reltopdir('.'))
def run_tests(steps):
'''run a list of steps'''
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
if skip_step(step):
continue
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if not run_step(step):
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
except Exception, msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
check_logs(step)
continue
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
check_logs(step)
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
write_fullresults()
return passed
util.mkdir_p(util.reltopdir('../buildlogs'))
lck = util.lock_file(util.reltopdir('../buildlogs/autotest.lck'))
if lck is None:
print("autotest is locked - exiting")
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
for s in steps:
if fnmatch.fnmatch(s.lower(), a.lower()):
matched.append(s)
steps = matched
try:
if not run_tests(steps):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
| gpl-3.0 |
40223209/2015cdbg5_0420 | static/Brython3.1.1-20150328-091302/Lib/configparser.py | 692 | 50025 | """Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException.
"""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException.
"""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", source]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", source]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %s' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except IOError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
| gpl-3.0 |
deepjets/deepjets | deepjets/samples.py | 1 | 20650 | from joblib import Parallel, delayed
import h5py
import numpy as np
from numpy.lib.recfunctions import append_fields
import dask.array as da
from dask.diagnostics import ProgressBar
import os
from .generate import generate_events, get_generator_input
from .preprocessing import preprocess, pixel_edges
from .extern.six import string_types
def eval_recarray(expr, rec):
return eval(expr, globals(), {name: rec[name] for name in rec.dtype.names})
def mask_nan_inf(arr, fill=0):
arr[np.isnan(arr) | np.isinf(arr)] = fill
DTYPE = np.double
dt_jet = np.dtype(
[('pT', DTYPE), ('eta', DTYPE), ('phi', DTYPE), ('mass', DTYPE)])
dt_jets = h5py.special_dtype(vlen=dt_jet)
dt_constit = h5py.special_dtype(vlen=np.dtype(
[('ET', DTYPE), ('eta', DTYPE), ('phi', DTYPE)]))
dt_particle = np.dtype(
[('E', DTYPE), ('px', DTYPE), ('py', DTYPE), ('pz', DTYPE), ('mass', DTYPE),
('prodx', DTYPE), ('prody', DTYPE), ('prodz', DTYPE), ('prodt', DTYPE),
('pdgid', DTYPE)]) # extra info needed by Delphes
dt_particles = h5py.special_dtype(vlen=dt_particle)
dt_candidate = np.dtype(
[('E', DTYPE), ('px', DTYPE), ('py', DTYPE), ('pz', DTYPE)])
dt_candidates = h5py.special_dtype(vlen=dt_candidate)
def create_jets_datasets(h5file, events, jet_size, subjet_size_fraction):
h5file.create_dataset('jet', (events,), maxshape=(events,), dtype=dt_jet, chunks=True)
h5file.create_dataset('trimmed_jet', (events,), maxshape=(events,), dtype=dt_jet, chunks=True)
h5file.create_dataset('subjets', (events,), maxshape=(events,), dtype=dt_jets, chunks=True)
h5file.create_dataset('constituents', (events,), maxshape=(events,), dtype=dt_constit, chunks=True)
h5file.create_dataset('trimmed_constituents', (events,), maxshape=(events,), dtype=dt_constit, chunks=True)
h5file.create_dataset('shrinkage', (events,), maxshape=(events,), dtype=DTYPE, chunks=True)
h5file.create_dataset('subjet_dr', (events,), maxshape=(events,), dtype=DTYPE, chunks=True)
h5file.create_dataset('tau_1', (events,), maxshape=(events,), dtype=DTYPE, chunks=True)
h5file.create_dataset('tau_2', (events,), maxshape=(events,), dtype=DTYPE, chunks=True)
h5file.create_dataset('tau_3', (events,), maxshape=(events,), dtype=DTYPE, chunks=True)
# metadatasets
dset_jet_size = h5file.create_dataset('jet_size', (1,), dtype=DTYPE)
dset_subjet_size_fraction = h5file.create_dataset('subjet_size_fraction', (1,), dtype=DTYPE)
dset_jet_size[0] = jet_size
dset_subjet_size_fraction[0] = subjet_size_fraction
def create_event_datasets(h5file, events, delphes=False, nweights=0):
dtype = dt_candidates if delphes else dt_particles
h5file.create_dataset('events', (events,), maxshape=(events,),
dtype=dtype, chunks=True)
if nweights > 0:
h5file.create_dataset('weights', (events, nweights),
maxshape=(events, nweights),
dtype=DTYPE, chunks=True)
def get_images(generator_params, nevents, pt_min, pt_max,
pixel_size=(0.1, 0.1), image_size=25, normalize=True,
jet_size=1.0, subjet_size_fraction=0.5, zoom=True, **kwargs):
"""
Return image array and weights
"""
params_dict = {
'PhaseSpace:pTHatMin': pt_min - 20.,
'PhaseSpace:pTHatMax': pt_max + 20.}
# defensive copy
generator_params = generator_params.copy()
generator_params['params_dict'] = params_dict
gen_input = get_generator_input('pythia', **generator_params)
images = np.empty((nevents, image_size, image_size), dtype=np.double)
pt = np.empty(nevents, dtype=np.double)
pt_trimmed = np.empty(nevents, dtype=np.double)
mass = np.empty(nevents, dtype=np.double)
mass_trimmed = np.empty(nevents, dtype=np.double)
subjet_dr = np.empty(nevents, dtype=np.double)
tau_1 = np.empty(nevents, dtype=np.double)
tau_2 = np.empty(nevents, dtype=np.double)
tau_3 = np.empty(nevents, dtype=np.double)
edges = pixel_edges(
jet_size=jet_size,
subjet_size_fraction=subjet_size_fraction,
pixel_size=pixel_size)
ievent = 0
for event in generate(gen_input, nevents, jet_size=jet_size,
subjet_size_fraction=subjet_size_fraction,
trimmed_pt_min=pt_min, trimmed_pt_max=pt_max,
compute_auxvars=True,
**kwargs):
image = preprocess(event.subjets, event.trimmed_constit, edges,
zoom=1. / event.shrinkage if zoom else False,
normalize=normalize,
out_width=image_size)
images[ievent] = image
pt[ievent] = event.jets[0]['pT']
pt_trimmed[ievent] = event.jets[1]['pT']
mass[ievent] = event.jets[0]['mass']
mass_trimmed[ievent] = event.jets[1]['mass']
subjet_dr[ievent] = event.subjet_dr
tau_1[ievent] = event.tau_1
tau_2[ievent] = event.tau_2
tau_3[ievent] = event.tau_3
ievent += 1
auxvars = np.core.records.fromarrays(
[pt, pt_trimmed, mass, mass_trimmed, subjet_dr, tau_1, tau_2, tau_3],
names='pt,pt_trimmed,mass,mass_trimmed,subjet_dr,tau_1,tau_2,tau_3')
return images, auxvars
def get_events(h5file, generator_params, nevents, pt_min, pt_max,
offset=0, **kwargs):
params_dict = {
'PhaseSpace:pTHatMin': pt_min - 20.,
'PhaseSpace:pTHatMax': pt_max + 20.}
# defensive copy
generator_params = generator_params.copy()
generator_params['params_dict'] = params_dict
gen_input = get_generator_input('pythia', **generator_params)
dset_jet = h5file['jet']
dset_trimmed_jet = h5file['trimmed_jet']
dset_subjets = h5file['subjets']
dset_constit = h5file['constituents']
dset_trimmed_constit = h5file['trimmed_constituents']
dset_shrinkage = h5file['shrinkage']
dset_dr_subjets = h5file['subjet_dr']
dset_tau_1 = h5file['tau_1']
dset_tau_2 = h5file['tau_2']
dset_tau_3 = h5file['tau_3']
ievent = offset
for event in generate(gen_input, nevents,
trimmed_pt_min=pt_min,
trimmed_pt_max=pt_max,
compute_auxvars=True,
**kwargs):
dset_jet[ievent] = event.jets[0]
dset_trimmed_jet[ievent] = event.jets[1]
dset_subjets[ievent] = event.subjets
dset_constit[ievent] = event.constit
dset_trimmed_constit[ievent] = event.trimmed_constit
dset_shrinkage[ievent] = event.shrinkage
dset_dr_subjets[ievent] = event.subjet_dr
dset_tau_1[ievent] = event.tau_1
dset_tau_2[ievent] = event.tau_2
dset_tau_3[ievent] = event.tau_3
ievent += 1
def get_flat_weights(pt, pt_min, pt_max, pt_bins):
# Compute weights such that pT distribution is flat
pt_hist, edges = np.histogram(
pt, bins=np.linspace(pt_min, pt_max, pt_bins + 1))
# Normalize
pt_hist = np.true_divide(pt_hist, pt_hist.sum())
image_weights = np.true_divide(
1., np.take(pt_hist, np.searchsorted(edges, pt) - 1))
image_weights = np.true_divide(image_weights, image_weights.mean())
return image_weights
def get_flat_images(generator_params, nevents_per_pt_bin,
pt_min, pt_max, pt_bins=10,
n_jobs=-1, **kwargs):
"""
Construct a sample of images over a pT range by combining samples
constructed in pT intervals in this range.
"""
random_state = kwargs.get('random_state', None)
pt_bin_edges = np.linspace(pt_min, pt_max, pt_bins + 1)
out = Parallel(n_jobs=n_jobs)(
delayed(get_images)(
generator_params, nevents_per_pt_bin, pt_lo, pt_hi, **kwargs)
for pt_lo, pt_hi in zip(pt_bin_edges[:-1], pt_bin_edges[1:]))
images = np.concatenate([x[0] for x in out])
auxvars = np.concatenate([x[1] for x in out])
pt = auxvars['pt_trimmed']
image_weights = get_flat_weights(pt, pt_min, pt_max, pt_bins * 4)
# add weights column to auxvars
auxvars = append_fields(auxvars, 'weights', data=image_weights)
# shuffle
random_state = np.random.RandomState(generator_params.get('random_state', 0))
permute_idx = random_state.permutation(images.shape[0])
images = images[permute_idx]
auxvars = auxvars[permute_idx]
return images, auxvars
def make_flat_images(filename, pt_min, pt_max, pt_bins=20,
mass_min=None, mass_max=None):
""" Crop and weight a dataset such that pt is within pt_min and pt_max
and the pt distribution is approximately flat. Return the images and
weights.
"""
hfile = h5py.File(filename, 'r')
images = da.from_array(hfile['images'], chunks=(10000, 25, 25))
auxvars = hfile['auxvars']
pt_trimmed = da.from_array(auxvars['pt_trimmed'], chunks=1000000)
accept = ((pt_trimmed >= pt_min) & (pt_trimmed < pt_max))
if mass_min is not None or mass_max is not None:
mass_trimmed = da.from_array(auxvars['mass_trimmed'], chunks=1000000)
if mass_min is not None:
accept &= mass_trimmed >= mass_min
if mass_max is not None:
accept &= mass_trimmed < mass_max
accept = accept.compute()
jet_pt = pt_trimmed[accept].compute()
w = get_flat_weights(jet_pt, pt_min, pt_max, pt_bins)
# combine accept and weights
weights = accept.astype(float)
weights[accept.nonzero()] *= w
# weights are zero when accept == False
return images, auxvars, weights
def dataset_append(h5output, datasetname, data,
dtype=None, chunked_read=False,
selection=None, indices=None):
""" Append an array to an HDF5 dataset
"""
if indices is not None and selection is not None:
raise NotImplementedError(
"handling both selection and indices is not implemented")
if isinstance(h5output, string_types):
h5file = h5py.File(h5output, 'a')
own_file = True
else:
own_file = False
h5file = h5output
if dtype is None:
dtype = data.dtype
convert_type = False
else:
convert_type = True
if datasetname not in h5file:
dset = h5file.create_dataset(
datasetname, data.shape,
maxshape=[None,] + list(data.shape)[1:],
dtype=dtype, chunks=True)
prev_size = 0
else:
dset = h5file[datasetname]
prev_size = dset.shape[0]
if selection is not None:
dset.resize(prev_size + selection.sum(), axis=0)
elif indices is not None:
dset.resize(prev_size + indices.shape[0], axis=0)
else:
dset.resize(prev_size + data.shape[0], axis=0)
if chunked_read and isinstance(data, h5py.Dataset):
# read dataset in chunks of size chunked_read
if len(data.shape) > 1:
elem_size = np.prod(data.shape[1:]) * data.dtype.itemsize
else:
elem_size = data.dtype.itemsize
chunk_size = int(chunked_read) / int(elem_size)
if chunk_size == 0:
raise RuntimeError(
"chunked_read is smaller than a single "
"element along first axis of input")
start = 0
offset = prev_size
if indices is not None:
end = indices.shape[0]
else:
end = len(data)
while start < end:
stop = min(end, start + chunk_size)
if indices is not None:
indices_chunk = indices[start:stop]
# index list must be sorted in h5py
indices_argsort = np.argsort(indices_chunk)
data_chunk = data[(indices_chunk[indices_argsort]).tolist()]
# unsort
data_chunk = np.take(data_chunk, indices_argsort, axis=0)
else:
data_chunk = data[start:stop]
if selection is not None:
data_chunk = np.take(data_chunk,
np.where(selection[start:stop]),
axis=0)[0]
if convert_type:
data_chunk = data_chunk.astype(dtype)
dset[offset:offset + data_chunk.shape[0]] = data_chunk
start = stop
offset += data_chunk.shape[0]
else:
if selection is not None:
data = np.take(data, np.where(selection), axis=0)[0]
elif indices is not None:
data = np.take(data, indices, axis=0)
if convert_type:
data = data.astype(dtype)
dset[prev_size:] = data
if own_file:
h5file.close()
else:
h5file.flush()
def get_flat_events(h5file, generator_params, nevents_per_pt_bin,
pt_min, pt_max, pt_bins=10, **kwargs):
"""
Construct a sample of events over a pT range by combining samples
constructed in pT intervals in this range.
"""
pt_bin_edges = np.linspace(pt_min, pt_max, pt_bins + 1)
offset = 0
for pt_lo, pt_hi in zip(pt_bin_edges[:-1], pt_bin_edges[1:]):
get_events(h5file, generator_params, nevents_per_pt_bin,
pt_lo, pt_hi, offset=offset, **kwargs)
offset += nevents_per_pt_bin
pt = h5file['trimmed_jet']['pT']
event_weights = get_flat_weights(pt, pt_min, pt_max, pt_bins * 4)
h5file.create_dataset('weights', data=event_weights)
class Sample(object):
def __init__(self, name, path,
prefix_w='w_', prefix_qcd='qcd_',
jet_size=1.0, subjet_size=0.3,
delphes=True, pileup=False, zoomed=False,
pt_min=250, pt_max=300, pt_bins=5,
mass_min=None, mass_max=None, jet_suffix=None):
self.name = name
self.path = path
self.prefix_w = prefix_w
self.prefix_qcd = prefix_qcd
self.filename = 'j{0:.1f}_sj{1:.2f}'.format(jet_size, subjet_size).replace('.', 'p')
if jet_suffix is not None:
self.filename += '_{0}'.format(jet_suffix)
if delphes:
self.filename += '_delphes'
self.filename += '_jets'
if pileup:
self.filename += '_pileup'
if zoomed:
self.filename += '_zoomed'
print("reading in W images for sample " + self.name)
self.images_w = make_flat_images(
os.path.join(self.path, self.prefix_w + '{0}_images.h5'.format(self.filename)),
pt_min, pt_max, pt_bins, mass_min=mass_min, mass_max=mass_max)
print("reading in QCD images for sample " + self.name)
self.images_qcd = make_flat_images(
os.path.join(self.path, self.prefix_qcd + '{0}_images.h5'.format(self.filename)),
pt_min, pt_max, pt_bins, mass_min=mass_min, mass_max=mass_max)
self.roc = None
def _avg_image(self, images):
images, auxvars, weights = images
print("{0}: plotting {1} images".format(self.name, images.shape[0]))
print("min weight: {0} max weight: {1}".format(weights.min(), weights.max()))
with ProgressBar():
avg_image = da.tensordot(images, weights, axes=(0, 0)).compute() / weights.sum()
return avg_image
@property
def avg_w_image(self):
try:
return self._avg_w_image
except AttributeError:
self._avg_w_image = self._avg_image(self.images_w)
return self._avg_w_image
@property
def avg_qcd_image(self):
try:
return self._avg_qcd_image
except AttributeError:
self._avg_qcd_image = self._avg_image(self.images_qcd)
return self._avg_qcd_image
def _get_proba(self, prefix, only_proba=True):
network_output_file = os.path.join(self.path, prefix + '{0}_images_proba.h5'.format(self.filename))
print("getting network output for sample {0} in {1}".format(self.name, network_output_file))
with h5py.File(network_output_file, 'r') as h5file:
if only_proba:
return h5file['Y_proba'].value
return h5file['Y_test'].value, h5file['Y_proba'].value, h5file['weights'].value
def get_w_proba(self, only_proba=True):
return self._get_proba(self.prefix_w, only_proba=only_proba)
def get_qcd_proba(self, only_proba=True):
return self._get_proba(self.prefix_qcd, only_proba=only_proba)
def get_roc(self, fields=None, generator_weight=None,
nb_per_bin=30, always_use_likelihood=False):
from .utils import default_inv_roc_curve, lklhd_inv_roc_curve, lklhd_inv_roc_curve2d
images_w, auxvars_w, weights_w = self.images_w
images_qcd, auxvars_qcd, weights_qcd = self.images_qcd
y_true = np.concatenate([np.repeat([[1, 0]], images_w.shape[0], axis=0),
np.repeat([[0, 1]], images_qcd.shape[0], axis=0)])
if not isinstance(fields, (tuple, list)):
fields = [fields]
if len(fields) > 2:
raise NotImplemented("cannot combine more than two parameters")
fields = sorted(fields)
has_aux = False
preds = []
for field in fields:
if field is None:
# network output
y_pred = np.concatenate([self.get_w_proba(), self.get_qcd_proba()])
elif field not in auxvars_w.dtype.names:
has_aux = True
y_pred = np.concatenate([eval_recarray(field, auxvars_w), eval_recarray(field, auxvars_qcd)])
else:
has_aux = True
y_pred = np.concatenate([auxvars_w[field], auxvars_qcd[field]])
mask_nan_inf(y_pred)
preds.append(y_pred)
weights = np.concatenate([weights_w, weights_qcd])
if generator_weight is not None:
w_weights = auxvars_w['generator_weights']
qcd_weights = auxvars_qcd['generator_weights']
weights *= np.concatenate([w_weights[:,generator_weight],
qcd_weights[:,generator_weight]])
# remove entries with no weight
take_weights = weights != 0
weights = weights[take_weights]
y_true = y_true[take_weights]
for idx in range(len(preds)):
preds[idx] = preds[idx][take_weights]
if len(preds) > 1:
return lklhd_inv_roc_curve2d(y_true, preds[0], preds[1], sample_weight=weights, nb_per_bin=nb_per_bin)
elif has_aux or always_use_likelihood:
return lklhd_inv_roc_curve(y_true, preds[0], sample_weight=weights, nb_per_bin=nb_per_bin)
# fall back on default ROC curve function
return default_inv_roc_curve(y_true, preds[0], sample_weight=weights)
def plot(self, ax, auxvar, generator_weight=None, fill_invalid=0, **kwargs):
images_w, auxvars_w, weights_w = self.images_w
images_qcd, auxvars_qcd, weights_qcd = self.images_qcd
# TODO: support 2-tuple for 2D likelihood ROC
if auxvar not in auxvars_w.dtype.names:
var_w, var_qcd = eval_recarray(auxvar, auxvars_w), eval_recarray(auxvar, auxvars_qcd)
else:
var_w, var_qcd = auxvars_w[auxvar], auxvars_qcd[auxvar]
# replace nan and inf with fill_invalid value
mask_nan_inf(var_w, fill=fill_invalid)
mask_nan_inf(var_qcd, fill=fill_invalid)
# multiply in generator weights if requested
if generator_weight is not None:
w_gen_weights = auxvars_w['generator_weights']
qcd_gen_weights = auxvars_qcd['generator_weights']
weights_w = weights_w * w_gen_weights[:,generator_weight]
weights_qcd = weights_qcd * qcd_gen_weights[:,generator_weight]
# remove 0-weights to help histogram range
noweight_w = weights_w == 0
noweight_qcd = weights_qcd == 0
var_w = var_w[~noweight_w]
weights_w = weights_w[~noweight_w]
var_qcd = var_qcd[~noweight_qcd]
weights_qcd = weights_qcd[~noweight_qcd]
# histogram signal and background
sig, _, _ = ax.hist(
var_w, weights=weights_w, label='Signal {0}'.format(self.name),
histtype='stepfilled', normed=1,
facecolor='none', linestyle='-',
**kwargs)
bkd, _, _ = ax.hist(
var_qcd, weights=weights_qcd, label='Background {0}'.format(self.name),
histtype='stepfilled', normed=1,
facecolor='none', linestyle='dotted',
**kwargs)
# return bin contents
return sig, bkd
| bsd-3-clause |
edx/edx-e2e-tests | edxapp_acceptance/pages/studio/users.py | 1 | 10620 | """
Page classes to test either the Course Team page or the Library Team page.
"""
import os
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from opaque_keys.edx.locator import CourseLocator
from edxapp_acceptance.pages.studio import BASE_URL
from edxapp_acceptance.pages.studio.course_page import CoursePage
from edxapp_acceptance.pages.studio.utils import HelpMixin
from edxapp_acceptance.pages.common.utils import disable_animations
def wait_for_ajax_or_reload(browser):
"""
Wait for all ajax requests to finish, OR for the page to reload.
Normal wait_for_ajax() chokes on occasion if the pages reloads,
giving "WebDriverException: Message: u'jQuery is not defined'"
"""
def _is_ajax_finished():
""" Wait for jQuery to finish all AJAX calls, if it is present. """
return browser.execute_script("return typeof(jQuery) == 'undefined' || jQuery.active == 0")
EmptyPromise(_is_ajax_finished, "Finished waiting for ajax requests.").fulfill()
class UsersPageMixin(PageObject):
""" Common functionality for course/library team pages """
new_user_form_selector = '.form-create.create-user .user-email-input'
def url(self):
"""
URL to this page - override in subclass
"""
raise NotImplementedError
def is_browser_on_page(self):
"""
Returns True if the browser has loaded the page.
"""
return self.q(css='body.view-team').present and not self.q(css='.ui-loading').present
@property
def users(self):
"""
Return a list of users listed on this page.
"""
return self.q(css='.user-list .user-item').map(
lambda el: UserWrapper(self.browser, el.get_attribute('data-email'))
).results
@property
def usernames(self):
"""
Returns a list of user names for users listed on this page
"""
return [user.name for user in self.users]
@property
def has_add_button(self):
"""
Is the "New Team Member" button present?
"""
return self.q(css='.create-user-button').present
def click_add_button(self):
"""
Click on the "New Team Member" button
"""
self.q(css='.create-user-button').first.click()
self.wait_for(lambda: self.new_user_form_visible, "Add user form is visible")
@property
def new_user_form_visible(self):
""" Is the new user form visible? """
return self.q(css='.form-create.create-user .user-email-input').visible
def set_new_user_email(self, email):
""" Set the value of the "New User Email Address" field. """
self.q(css='.form-create.create-user .user-email-input').fill(email)
def click_submit_new_user_form(self):
""" Submit the "New User" form """
self.q(css='.form-create.create-user .action-primary').click()
wait_for_ajax_or_reload(self.browser)
self.wait_for_element_visibility('.user-list', 'wait for team to load')
def get_user(self, email):
""" Gets user wrapper by email """
target_users = [user for user in self.users if user.email == email]
assert len(target_users) == 1
return target_users[0]
def add_user_to_course(self, email):
""" Adds user to a course/library """
self.wait_for_element_visibility('.create-user-button', "Add team member button is available")
self.click_add_button()
self.set_new_user_email(email)
self.click_submit_new_user_form()
self.wait_for_page()
def delete_user_from_course(self, email):
""" Deletes user from course/library """
target_user = self.get_user(email)
target_user.click_delete()
self.wait_for_page()
def modal_dialog_visible(self, dialog_type):
""" Checks if modal dialog of specified class is displayed """
return self.q(css=f'.prompt.{dialog_type}').visible
def modal_dialog_text(self, dialog_type):
""" Gets modal dialog text """
return self.q(css=f'.prompt.{dialog_type} .message').text[0]
def wait_until_no_loading_indicator(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to finish
and be removed from the DOM.
This method is different from wait_until_ready because the loading element
is removed from the DOM, rather than hidden.
It also disables animations for improved test reliability.
"""
self.wait_for(
lambda: not self.q(css='.ui-loading').present,
"Wait for page to complete its initial loading"
)
disable_animations(self)
def wait_until_ready(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to
finish.
This method is different from wait_until_no_loading_indicator because this expects
the loading indicator to still exist on the page; it is just hidden.
It also disables animations for improved test reliability.
"""
self.wait_for_element_invisibility(
'.ui-loading',
'Wait for the page to complete its initial loading'
)
disable_animations(self)
class LibraryUsersPage(UsersPageMixin, HelpMixin):
"""
Library Team page in Studio
"""
def __init__(self, browser, locator):
super().__init__(browser)
self.locator = locator
@property
def url(self):
"""
URL to the "User Access" page for the given library.
"""
return "{}/library/{}/team/".format(BASE_URL, str(self.locator))
class CourseTeamPage(UsersPageMixin, CoursePage):
"""
Course Team page in Studio.
"""
url_path = "course_team"
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['course_org'],
self.course_info['course_num'],
self.course_info['course_run'],
deprecated=(default_store == 'draft')
)
return "/".join([BASE_URL, self.url_path, str(course_key)])
class UserWrapper(PageObject):
"""
A PageObject representing a wrapper around a user listed on the course/library team page.
"""
url = None
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, email):
super().__init__(browser)
self.email = email
self.selector = f'.user-list .user-item[data-email="{self.email}"]'
def is_browser_on_page(self):
"""
Sanity check that our wrapper element is on the page.
"""
return self.q(css=self.selector).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular user entry's context
"""
return f'{self.selector} {selector}'
@property
def name(self):
""" Get this user's username, as displayed. """
text = self.q(css=self._bounded_selector('.user-username')).text
return text[0] if text else None
@property
def role_label(self):
""" Get this user's role, as displayed. """
text = self.q(css=self._bounded_selector('.flag-role .value')).text
return text[0] if text else None
@property
def is_current_user(self):
""" Does the UI indicate that this is the current user? """
return self.q(css=self._bounded_selector('.flag-role .msg-you')).present
@property
def can_promote(self):
""" Can this user be promoted to a more powerful role? """
return self.q(css=self._bounded_selector('.add-admin-role')).present
@property
def promote_button_text(self):
""" What does the promote user button say? """
text = self.q(css=self._bounded_selector('.add-admin-role')).text
return text[0] if text else None
def click_promote(self):
""" Click on the button to promote this user to the more powerful role """
self.q(css=self._bounded_selector('.add-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_demote(self):
""" Can this user be demoted to a less powerful role? """
return self.q(css=self._bounded_selector('.remove-admin-role')).present
@property
def demote_button_text(self):
""" What does the demote user button say? """
text = self.q(css=self._bounded_selector('.remove-admin-role')).text
return text[0] if text else None
def click_demote(self):
""" Click on the button to demote this user to the less powerful role """
self.q(css=self._bounded_selector('.remove-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_delete(self):
""" Can this user be deleted? """
return self.q(css=self._bounded_selector('.action-delete:not(.is-disabled) .remove-user')).present
def click_delete(self):
""" Click the button to delete this user. """
disable_animations(self)
self.q(css=self._bounded_selector('.remove-user')).click()
# We can't use confirm_prompt because its wait_for_ajax is flaky when the page is expected to reload.
self.wait_for_element_visibility('.prompt', 'Prompt is visible')
self.wait_for_element_visibility('.prompt .action-primary', 'Confirmation button is visible')
self.q(css='.prompt .action-primary').click()
self.wait_for_element_absence('.page-prompt .is-shown', 'Confirmation prompt is hidden')
wait_for_ajax_or_reload(self.browser)
@property
def has_no_change_warning(self):
""" Does this have a warning in place of the promote/demote buttons? """
return self.q(css=self._bounded_selector('.notoggleforyou')).present
@property
def no_change_warning_text(self):
""" Text of the warning seen in place of the promote/demote buttons. """
return self.q(css=self._bounded_selector('.notoggleforyou')).text[0]
| agpl-3.0 |
RonSadlier/gr-qitkat | python/qa_sdc_decode_bb.py | 1 | 5568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Ronald Sadlier - Oak Ridge National Laboratory
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import qitkat_swig as qitkat
class qa_sdc_decode_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def test_001_n2(self):
# The number of input bytes must be in multiples of 4
# For input, bit pair ordering is from right to left, while the bit ordering is from left to right
# So for example (if n=2): (00000001) => (01,00,00,00)
src_data = (0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 0, 0, 3, 3, 3, 3)
# (00000000, 00000001, 01010101, 10101010, 00001111, 11111111)
expected_result = (0, 1, 85, 170, 15, 255)
src = blocks.vector_source_b(src_data)
sdc_decode = qitkat.sdc_decode_bb(2)
dst = blocks.vector_sink_b()
self.tb.connect(src, sdc_decode)
self.tb.connect(sdc_decode, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002_n3(self):
# The number of input bytes must be in multiples of 8
# For input, bit pair ordering is from right to left, while the bit ordering is from left to right
# So for example (if n=2): (00000001) => (01,00,00,00)
src_data = (5, 2, 5, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 4, 2, 5)
# (00000000, 00000001, 01010101, 10101010, 00001111, 11111111)
expected_result = (0, 1, 85, 170, 15, 255)
src = blocks.vector_source_b(src_data)
sdc_decode = qitkat.sdc_decode_bb(3)
dst = blocks.vector_sink_b()
self.tb.connect(src, sdc_decode)
self.tb.connect(sdc_decode, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_003_n4(self):
# The number of input bytes must be in multiples of 2
# For input, bit pair ordering is from right to left, while the bit ordering is from left to right
# So for example (if n=2): (00000001) => (01,00,00,00)
src_data = (0, 0, 1, 0, 5, 5, 10, 10, 15, 0, 15, 15)
# (00000000, 00000001, 01010101, 10101010, 00001111, 11111111)
expected_result = (0, 1, 85, 170, 15, 255)
src = blocks.vector_source_b(src_data)
sdc_decode = qitkat.sdc_decode_bb(4)
dst = blocks.vector_sink_b()
self.tb.connect(src, sdc_decode)
self.tb.connect(sdc_decode, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_004_n5(self):
# The number of input bytes must be in multiples of 2
# For input, bit pair ordering is from right to left, while the bit ordering is from left to right
# So for example (if n=2): (00000001) => (01,00,00,00)
src_data = (0, 0, 1, 0, 5, 5, 10, 10, 15, 0, 15, 15)
# (00000000, 00000001, 01010101, 10101010, 00001111, 11111111)
expected_result = (0, 1, 85, 170, 15, 255)
src = blocks.vector_source_b(src_data)
sdc_decode = qitkat.sdc_decode_bb(5)
dst = blocks.vector_sink_b()
self.tb.connect(src, sdc_decode)
self.tb.connect(sdc_decode, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_005_n6(self):
# The number of input bytes must be in multiples of 8
# For output, bit pair ordering is from right to left, while the bit ordering is from left to right
# So for example (if n=2): (00000001) => (01,00,00,00)
src_data = (15, 16, 10, 11, 21, 0, 0, 0, 31, 31, 3, 30, 0, 21, 22, 10)
# (00000000, 00000001, 01010101, 10101010, 00001111, 01010101, 10101010, 00001111, 00001111, 11111111)
expected_result = (0, 1, 85, 170, 15, 85, 170, 15, 15, 255)
src = blocks.vector_source_b(src_data)
sdc_decode = qitkat.sdc_decode_bb(6)
dst = blocks.vector_sink_b()
self.tb.connect(src, sdc_decode)
self.tb.connect(sdc_decode, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_006_n7(self):
# The number of input bytes must be in multiples of 8
# For output, bit pair ordering is from right to left, while the bit ordering is from left to right
# So for example (if n=2): (00000001) => (01,00,00,00)
src_data = (15, 16, 10, 11, 21, 0, 0, 0, 31, 31, 3, 30, 0, 21, 22, 10)
# (00000000, 00000001, 01010101, 10101010, 00001111, 01010101, 10101010, 00001111, 00001111, 11111111)
expected_result = (0, 1, 85, 170, 15, 85, 170, 15, 15, 255)
src = blocks.vector_source_b(src_data)
sdc_decode = qitkat.sdc_decode_bb(7)
dst = blocks.vector_sink_b()
self.tb.connect(src, sdc_decode)
self.tb.connect(sdc_decode, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(qa_sdc_decode_bb, "qa_sdc_decode_bb.xml")
| gpl-3.0 |
gooddata/openstack-nova | nova/tests/functional/notification_sample_tests/test_libvirt.py | 4 | 2153 | # Copyright 2018 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
import nova.conf
from nova import exception
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit import fake_notifier
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import host
CONF = nova.conf.CONF
class TestLibvirtErrorNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def setUp(self):
self.flags(compute_driver='libvirt.LibvirtDriver')
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.useFixture(fixtures.MockPatchObject(host.Host, 'initialize'))
super(TestLibvirtErrorNotificationSample, self).setUp()
@mock.patch('nova.virt.libvirt.host.Host._get_connection')
def test_libvirt_connect_error(self, mock_get_conn):
mock_get_conn.side_effect = fakelibvirt.libvirtError(
'Sample exception for versioned notification test.')
# restart the compute service
self.assertRaises(exception.HypervisorUnavailable,
self.restart_compute_service, self.compute)
self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
self._verify_notification(
'libvirt-connect-error',
replacements={
'ip': CONF.my_ip,
'reason.function_name': self.ANY,
'reason.module_name': self.ANY,
'reason.traceback': self.ANY
},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
| apache-2.0 |
chriskmanx/qmole | QMOLEDEV64/nmap-4.76/zenmap/zenmapGUI/ProfileCombo.py | 2 | 2102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Insecure.Com LLC.
#
# Author: Adriano Monteiro Marques <py.adriano@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import gtk
from zenmapCore.UmitConf import CommandProfile
from zenmapCore.I18N import _
class ProfileCombo(gtk.ComboBoxEntry, object):
def __init__(self):
gtk.ComboBoxEntry.__init__(self, gtk.ListStore(str), 0)
self.completion = gtk.EntryCompletion()
self.child.set_completion(self.completion)
self.completion.set_model(self.get_model())
self.completion.set_text_column(0)
self.update()
def set_profiles(self, profiles):
list = self.get_model()
for i in range(len(list)):
iter = list.get_iter_root()
del(list[iter])
for command in profiles:
list.append([command])
def update(self):
profile = CommandProfile()
profiles = profile.sections()
profiles.sort()
del(profile)
self.set_profiles(profiles)
def get_selected_profile(self):
return self.child.get_text()
def set_selected_profile(self, profile):
self.child.set_text(profile)
selected_profile = property(get_selected_profile, set_selected_profile)
if __name__ == "__main__":
w = gtk.Window()
p = ProfileCombo()
p.update()
w.add(p)
w.show_all()
gtk.main()
| gpl-3.0 |
sarvex/tensorflow | tensorflow/lite/testing/op_tests/unique.py | 17 | 2365 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for unique."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
}, {
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
}, {
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
}, {
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.compat.v1.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 |
zhouzhenghui/python-for-android | python3-alpha/python3-src/Lib/test/inspect_fodder2.py | 179 | 1419 | # line 1
def wrap(foo=None):
def wrapper(func):
return func
return wrapper
# line 7
def replace(func):
def insteadfunc():
print('hello')
return insteadfunc
# line 13
@wrap()
@wrap(wrap)
def wrapped():
pass
# line 19
@replace
def gone():
pass
# line 24
oll = lambda m: m
# line 27
tll = lambda g: g and \
g and \
g
# line 32
tlli = lambda d: d and \
d
# line 36
def onelinefunc(): pass
# line 39
def manyargs(arg1, arg2,
arg3, arg4): pass
# line 43
def twolinefunc(m): return m and \
m
# line 47
a = [None,
lambda x: x,
None]
# line 52
def setfunc(func):
globals()["anonymous"] = func
setfunc(lambda x, y: x*y)
# line 57
def with_comment(): # hello
world
# line 61
multiline_sig = [
lambda x, \
y: x+y,
None,
]
# line 68
def func69():
class cls70:
def func71():
pass
return cls70
extra74 = 74
# line 76
def func77(): pass
(extra78, stuff78) = 'xy'
extra79 = 'stop'
# line 81
class cls82:
def func83(): pass
(extra84, stuff84) = 'xy'
extra85 = 'stop'
# line 87
def func88():
# comment
return 90
# line 92
def f():
class X:
def g():
"doc"
return 42
return X
method_in_dynamic_class = f().g
#line 101
def keyworded(*arg1, arg2=1):
pass
#line 105
def annotated(arg1: list):
pass
#line 109
def keyword_only_arg(*, arg):
pass
| apache-2.0 |
wgcv/SWW-Crashphone | lib/python2.7/site-packages/PIL/PyAccess.py | 13 | 8645 | #
# The Python Imaging Library
# Pillow fork
#
# Python implementation of the PixelAccess Object
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
# Copyright (c) 2013 Eric Soroos
#
# See the README file for information on usage and redistribution
#
# Notes:
#
# * Implements the pixel access object following Access.
# * Does not implement the line functions, as they don't appear to be used
# * Taking only the tuple form, which is used from python.
# * Fill.c uses the integer form, but it's still going to use the old
# Access.c implementation.
#
from __future__ import print_function
from cffi import FFI
import sys
DEBUG = 0
defs = """
struct Pixel_RGBA {
unsigned char r,g,b,a;
};
struct Pixel_I16 {
unsigned char l,r;
};
"""
ffi = FFI()
ffi.cdef(defs)
class PyAccess(object):
def __init__(self, img, readonly=False):
vals = dict(img.im.unsafe_ptrs)
self.readonly = readonly
self.image8 = ffi.cast('unsigned char **', vals['image8'])
self.image32 = ffi.cast('int **', vals['image32'])
self.image = ffi.cast('unsigned char **', vals['image'])
self.xsize = vals['xsize']
self.ysize = vals['ysize']
if DEBUG:
print (vals)
self._post_init()
def _post_init():
pass
def __setitem__(self, xy, color):
"""
Modifies the pixel at x,y. The color is given as a single
numerical value for single band images, and a tuple for
multi-band images
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
if self.readonly:
raise ValueError('Attempt to putpixel a read only image')
(x, y) = self.check_xy(xy)
return self.set_pixel(x, y, color)
def __getitem__(self, xy):
"""
Returns the pixel at x,y. The pixel is returned as a single
value for single band images or a tuple for multiple band
images
:param xy: The pixel coordinate, given as (x, y).
:returns: a pixel value for single band images, a tuple of
pixel values for multiband images.
"""
(x, y) = self.check_xy(xy)
return self.get_pixel(x, y)
putpixel = __setitem__
getpixel = __getitem__
def check_xy(self, xy):
(x, y) = xy
if not (0 <= x < self.xsize and 0 <= y < self.ysize):
raise ValueError('pixel location out of range')
return xy
class _PyAccess32_2(PyAccess):
""" PA, LA, stored in first and last bytes of a 32 bit word """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
def get_pixel(self, x, y):
pixel = self.pixels[y][x]
return (pixel.r, pixel.a)
def set_pixel(self, x, y, color):
pixel = self.pixels[y][x]
# tuple
pixel.r = min(color[0], 255)
pixel.a = min(color[1], 255)
class _PyAccess32_3(PyAccess):
""" RGB and friends, stored in the first three bytes of a 32 bit word """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
def get_pixel(self, x, y):
pixel = self.pixels[y][x]
return (pixel.r, pixel.g, pixel.b)
def set_pixel(self, x, y, color):
pixel = self.pixels[y][x]
# tuple
pixel.r = min(color[0], 255)
pixel.g = min(color[1], 255)
pixel.b = min(color[2], 255)
class _PyAccess32_4(PyAccess):
""" RGBA etc, all 4 bytes of a 32 bit word """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32)
def get_pixel(self, x, y):
pixel = self.pixels[y][x]
return (pixel.r, pixel.g, pixel.b, pixel.a)
def set_pixel(self, x, y, color):
pixel = self.pixels[y][x]
# tuple
pixel.r = min(color[0], 255)
pixel.g = min(color[1], 255)
pixel.b = min(color[2], 255)
pixel.a = min(color[3], 255)
class _PyAccess8(PyAccess):
""" 1, L, P, 8 bit images stored as uint8 """
def _post_init(self, *args, **kwargs):
self.pixels = self.image8
def get_pixel(self, x, y):
return self.pixels[y][x]
def set_pixel(self, x, y, color):
try:
# integer
self.pixels[y][x] = min(color, 255)
except:
# tuple
self.pixels[y][x] = min(color[0], 255)
class _PyAccessI16_N(PyAccess):
""" I;16 access, native bitendian without conversion """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast('unsigned short **', self.image)
def get_pixel(self, x, y):
return self.pixels[y][x]
def set_pixel(self, x, y, color):
try:
# integer
self.pixels[y][x] = min(color, 65535)
except:
# tuple
self.pixels[y][x] = min(color[0], 65535)
class _PyAccessI16_L(PyAccess):
""" I;16L access, with conversion """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast('struct Pixel_I16 **', self.image)
def get_pixel(self, x, y):
pixel = self.pixels[y][x]
return pixel.l + pixel.r * 256
def set_pixel(self, x, y, color):
pixel = self.pixels[y][x]
try:
color = min(color, 65535)
except:
color = min(color[0], 65535)
pixel.l = color & 0xFF
pixel.r = color >> 8
class _PyAccessI16_B(PyAccess):
""" I;16B access, with conversion """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast('struct Pixel_I16 **', self.image)
def get_pixel(self, x, y):
pixel = self.pixels[y][x]
return pixel.l * 256 + pixel.r
def set_pixel(self, x, y, color):
pixel = self.pixels[y][x]
try:
color = min(color, 65535)
except:
color = min(color[0], 65535)
pixel.l = color >> 8
pixel.r = color & 0xFF
class _PyAccessI32_N(PyAccess):
""" Signed Int32 access, native endian """
def _post_init(self, *args, **kwargs):
self.pixels = self.image32
def get_pixel(self, x, y):
return self.pixels[y][x]
def set_pixel(self, x, y, color):
self.pixels[y][x] = color
class _PyAccessI32_Swap(PyAccess):
""" I;32L/B access, with byteswapping conversion """
def _post_init(self, *args, **kwargs):
self.pixels = self.image32
def reverse(self, i):
orig = ffi.new('int *', i)
chars = ffi.cast('unsigned char *', orig)
chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], \
chars[1], chars[0]
return ffi.cast('int *', chars)[0]
def get_pixel(self, x, y):
return self.reverse(self.pixels[y][x])
def set_pixel(self, x, y, color):
self.pixels[y][x] = self.reverse(color)
class _PyAccessF(PyAccess):
""" 32 bit float access """
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast('float **', self.image32)
def get_pixel(self, x, y):
return self.pixels[y][x]
def set_pixel(self, x, y, color):
try:
# not a tuple
self.pixels[y][x] = color
except:
# tuple
self.pixels[y][x] = color[0]
mode_map = {'1': _PyAccess8,
'L': _PyAccess8,
'P': _PyAccess8,
'LA': _PyAccess32_2,
'PA': _PyAccess32_2,
'RGB': _PyAccess32_3,
'LAB': _PyAccess32_3,
'HSV': _PyAccess32_3,
'YCbCr': _PyAccess32_3,
'RGBA': _PyAccess32_4,
'RGBa': _PyAccess32_4,
'RGBX': _PyAccess32_4,
'CMYK': _PyAccess32_4,
'F': _PyAccessF,
'I': _PyAccessI32_N,
}
if sys.byteorder == 'little':
mode_map['I;16'] = _PyAccessI16_N
mode_map['I;16L'] = _PyAccessI16_N
mode_map['I;16B'] = _PyAccessI16_B
mode_map['I;32L'] = _PyAccessI32_N
mode_map['I;32B'] = _PyAccessI32_Swap
else:
mode_map['I;16'] = _PyAccessI16_L
mode_map['I;16L'] = _PyAccessI16_L
mode_map['I;16B'] = _PyAccessI16_N
mode_map['I;32L'] = _PyAccessI32_Swap
mode_map['I;32B'] = _PyAccessI32_N
def new(img, readonly=False):
access_type = mode_map.get(img.mode, None)
if not access_type:
if DEBUG:
print("PyAccess Not Implemented: %s" % img.mode)
return None
if DEBUG:
print("New PyAccess: %s" % img.mode)
return access_type(img, readonly)
# End of file
| apache-2.0 |
henryiii/rootpy | rootpy/extern/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
exis-io/node | runner/backend.py | 1 | 1819 | '''
Autobahn examples using vanilla WAMP
'''
from os import environ
from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from autobahn.wamp.types import CallResult
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
def callAdd(a, b):
print 'Server called Add with ', a, b
return a + b
def pub(a):
print 'Server received a pub with ', a
def kill():
print 'Quitting'
reactor.stop()
def hasPermission():
print 'Query for a permission'
return True
class Component(ApplicationSession):
"""
Application component that provides procedures which
return complex results.
"""
@inlineCallbacks
def onJoin(self, details):
print "session attached"
# yield self.publish('pd/hello')
yield self.register(callAdd, 'pd.damouse/add')
# yield self.register(hasPermission, 'pd.bouncer/checkPerm')
yield self.register(kill, 'pd.damouse/kill')
print 'Publishing to pd.pub'
yield self.publish('pd/hello')
yield self.subscribe(pub, 'pd.damouse/pub')
# Testing internal no-register
# Router always attempts to pub on "/pong"
# when it sees a "/ping." Expect the call to go nowhere this first time,
# and the frontend to receive a blind pub on the second on e
yield self.publish('pd/ping')
# Trigger node-internal testing
yield self.publish('pd/hello')
print "procedures registered"
if __name__ == '__main__':
runner = ApplicationRunner(
"ws://127.0.0.1:8000/ws",
# "ws://paradrop.io:8000/ws",
u"pd.damouse",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| mit |
odoomrp/odoomrp-wip | purchase_landed_cost/models/stock_picking.py | 12 | 1334 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api
class StockPicking(models.Model):
_inherit = 'stock.picking'
@api.multi
def action_open_landed_cost(self):
self.ensure_one()
line_obj = self.env['purchase.cost.distribution.line']
lines = line_obj.search([('picking_id', '=', self.id)])
if lines:
mod_obj = self.env['ir.model.data']
model, action_id = tuple(
mod_obj.get_object_reference(
'purchase_landed_cost',
'action_purchase_cost_distribution'))
action = self.env[model].browse(action_id).read()[0]
ids = set([x.distribution.id for x in lines])
if len(ids) == 1:
res = mod_obj.get_object_reference(
'purchase_landed_cost', 'purchase_cost_distribution_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = list(ids)[0]
else:
action['domain'] = "[('id', 'in', %s)]" % list(ids)
return action
| agpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/cffi-1.5.2/doc/source/conf.py | 8 | 6333 | # -*- coding: utf-8 -*-
#
# CFFI documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 14 16:37:47 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CFFI'
copyright = u'2012-2015, Armin Rigo, Maciej Fijalkowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'CFFIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CFFI.tex', u'CFFI Documentation',
u'Armin Rigo, Maciej Fijalkowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 |
ferabra/edx-platform | common/lib/capa/capa/tests/test_shuffle.py | 196 | 13736 | """Tests the capa shuffle and name-masking."""
import unittest
import textwrap
from . import test_capa_system, new_loncapa_problem
from capa.responsetypes import LoncapaProblemError
class CapaShuffleTest(unittest.TestCase):
"""Capa problem tests for shuffling and choice-name masking."""
def setUp(self):
super(CapaShuffleTest, self).setUp()
self.system = test_capa_system()
def test_shuffle_4_choices(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
# shuffling 4 things with seed of 0 yields: B A C D
# Check that the choices are shuffled
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*</div>")
# Check that choice name masking is enabled and that unmasking works
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertEqual(response.unmask_order(), ['choice_1', 'choice_0', 'choice_2', 'choice_3'])
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
def test_shuffle_custom_names(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false" name="aaa">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true" name="ddd">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
# B A C D
# Check that the custom name= names come through
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(response.has_shuffle())
self.assertEqual(response.unmask_order(), ['choice_0', 'choice_aaa', 'choice_1', 'choice_ddd'])
def test_shuffle_different_seed(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=341) # yields D A B C
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Donut'.*'Apple'.*'Banana'.*'Chocolate'.*\].*</div>")
def test_shuffle_1_choice(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="true">Apple</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Apple'.*\].*</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(response.has_shuffle())
self.assertEqual(response.unmask_order(), ['choice_0'])
def test_shuffle_6_choices(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Zonut</choice>
<choice correct ="false">Eggplant</choice>
<choice correct ="false">Filet Mignon</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0) # yields: C E A B D F
# Donut -> Zonut to show that there is not some hidden alphabetic ordering going on
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Chocolate'.*'Eggplant'.*'Apple'.*'Banana'.*'Zonut'.*'Filet Mignon'.*\].*</div>")
def test_shuffle_false(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="false">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Apple'.*'Banana'.*'Chocolate'.*'Donut'.*\].*</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_shuffle())
def test_shuffle_fixed_head_end(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false" fixed="true">Alpha</choice>
<choice correct="false" fixed="true">Beta</choice>
<choice correct="false">A</choice>
<choice correct="false">B</choice>
<choice correct="false">C</choice>
<choice correct ="true">D</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
# Alpha Beta held back from shuffle (head end)
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*\].*</div>")
def test_shuffle_fixed_tail_end(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">A</choice>
<choice correct="false">B</choice>
<choice correct="false">C</choice>
<choice correct ="true">D</choice>
<choice correct="false" fixed="true">Alpha</choice>
<choice correct="false" fixed="true">Beta</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
# Alpha Beta held back from shuffle (tail end)
self.assertRegexpMatches(the_html, r"<div>.*\[.*'B'.*'A'.*'C'.*'D'.*'Alpha'.*'Beta'.*\].*</div>")
def test_shuffle_fixed_both_ends(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false" fixed="true">Alpha</choice>
<choice correct="false" fixed="true">Beta</choice>
<choice correct="false">A</choice>
<choice correct="false">B</choice>
<choice correct="false">C</choice>
<choice correct ="true">D</choice>
<choice correct="false" fixed="true">Psi</choice>
<choice correct="false" fixed="true">Omega</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*'Psi'.*'Omega'.*\].*</div>")
def test_shuffle_fixed_both_ends_thin(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false" fixed="true">Alpha</choice>
<choice correct="false">A</choice>
<choice correct="true" fixed="true">Omega</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'A'.*'Omega'.*\].*</div>")
def test_shuffle_fixed_all(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false" fixed="true">A</choice>
<choice correct="false" fixed="true">B</choice>
<choice correct="true" fixed="true">C</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'A'.*'B'.*'C'.*\].*</div>")
def test_shuffle_island(self):
"""A fixed 'island' choice not at the head or tail end gets lumped into the tail end."""
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false" fixed="true">A</choice>
<choice correct="false">Mid</choice>
<choice correct="true" fixed="true">C</choice>
<choice correct="False">Mid</choice>
<choice correct="false" fixed="true">D</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'A'.*'Mid'.*'Mid'.*'C'.*'D'.*\].*</div>")
def test_multiple_shuffle_responses(self):
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
<p>Here is some text</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">A</choice>
<choice correct="false">B</choice>
<choice correct="false">C</choice>
<choice correct ="true">D</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=0)
orig_html = problem.get_html()
self.assertEqual(orig_html, problem.get_html(), 'should be able to call get_html() twice')
html = orig_html.replace('\n', ' ') # avoid headaches with .* matching
print html
self.assertRegexpMatches(html, r"<div>.*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*</div>.*" +
r"<div>.*\[.*'C'.*'A'.*'D'.*'B'.*\].*</div>")
# Look at the responses in their authored order
responses = sorted(problem.responders.values(), key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:]))
self.assertFalse(responses[0].has_mask())
self.assertTrue(responses[0].has_shuffle())
self.assertTrue(responses[1].has_shuffle())
self.assertEqual(responses[0].unmask_order(), ['choice_1', 'choice_0', 'choice_2', 'choice_3'])
self.assertEqual(responses[1].unmask_order(), ['choice_2', 'choice_0', 'choice_3', 'choice_1'])
def test_shuffle_not_with_answerpool(self):
"""Raise error if shuffle and answer-pool are both used."""
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true" answer-pool="4">
<choice correct="false" fixed="true">A</choice>
<choice correct="false">Mid</choice>
<choice correct="true" fixed="true">C</choice>
<choice correct="False">Mid</choice>
<choice correct="false" fixed="true">D</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "shuffle and answer-pool"):
new_loncapa_problem(xml_str)
| agpl-3.0 |
kawamon/hue | desktop/core/ext-py/eventlet-0.24.1/eventlet/greenio/py3.py | 6 | 6384 | import _pyio as _original_pyio
import errno
import os as _original_os
import socket as _original_socket
from io import (
BufferedRandom as _OriginalBufferedRandom,
BufferedReader as _OriginalBufferedReader,
BufferedWriter as _OriginalBufferedWriter,
DEFAULT_BUFFER_SIZE,
TextIOWrapper as _OriginalTextIOWrapper,
IOBase as _OriginalIOBase,
)
from types import FunctionType
from eventlet.greenio.base import (
_operation_on_closed_file,
greenpipe_doc,
set_nonblocking,
SOCKET_BLOCKING,
)
from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
from eventlet.support import get_errno
import six
__all__ = ['_fileobject', 'GreenPipe']
# TODO get rid of this, it only seems like the original _fileobject
_fileobject = _original_socket.SocketIO
# Large part of the following code is copied from the original
# eventlet.greenio module
class GreenFileIO(_OriginalIOBase):
def __init__(self, name, mode='r', closefd=True, opener=None):
if isinstance(name, int):
fileno = name
self._name = "<fd:%d>" % fileno
else:
assert isinstance(name, six.string_types)
with open(name, mode) as fd:
self._name = fd.name
fileno = _original_os.dup(fd.fileno())
notify_opened(fileno)
self._fileno = fileno
self._mode = mode
self._closed = False
set_nonblocking(self)
self._seekable = None
@property
def closed(self):
return self._closed
def seekable(self):
if self._seekable is None:
try:
_original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
except IOError as e:
if get_errno(e) == errno.ESPIPE:
self._seekable = False
else:
raise
else:
self._seekable = True
return self._seekable
def readable(self):
return 'r' in self._mode or '+' in self._mode
def writable(self):
return 'w' in self._mode or '+' in self._mode
def fileno(self):
return self._fileno
def read(self, size=-1):
if size == -1:
return self.readall()
while True:
try:
return _original_os.read(self._fileno, size)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
self._trampoline(self, read=True)
def readall(self):
buf = []
while True:
try:
chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
if chunk == b'':
return b''.join(buf)
buf.append(chunk)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
self._trampoline(self, read=True)
def readinto(self, b):
up_to = len(b)
data = self.read(up_to)
bytes_read = len(data)
b[:bytes_read] = data
return bytes_read
def isatty(self):
try:
return _original_os.isatty(self.fileno())
except OSError as e:
raise IOError(*e.args)
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
if self._closed:
# Don't trampoline if we're already closed.
raise IOClosed()
try:
return trampoline(fd, read=read, write=write, timeout=timeout,
timeout_exc=timeout_exc,
mark_as_closed=self._mark_as_closed)
except IOClosed:
# Our fileno has been obsoleted. Defang ourselves to
# prevent spurious closes.
self._mark_as_closed()
raise
def _mark_as_closed(self):
""" Mark this socket as being closed """
self._closed = True
def write(self, data):
view = memoryview(data)
datalen = len(data)
offset = 0
while offset < datalen:
try:
written = _original_os.write(self._fileno, view[offset:])
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
trampoline(self, write=True)
else:
offset += written
return offset
def close(self):
if not self._closed:
self._closed = True
_original_os.close(self._fileno)
notify_close(self._fileno)
for method in [
'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
setattr(self, method, _operation_on_closed_file)
def truncate(self, size=-1):
if size == -1:
size = self.tell()
try:
rv = _original_os.ftruncate(self._fileno, size)
except OSError as e:
raise IOError(*e.args)
else:
self.seek(size) # move position&clear buffer
return rv
def seek(self, offset, whence=_original_os.SEEK_SET):
try:
return _original_os.lseek(self._fileno, offset, whence)
except OSError as e:
raise IOError(*e.args)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
_open_environment = dict(globals())
_open_environment.update(dict(
BufferedRandom=_OriginalBufferedRandom,
BufferedWriter=_OriginalBufferedWriter,
BufferedReader=_OriginalBufferedReader,
TextIOWrapper=_OriginalTextIOWrapper,
FileIO=GreenFileIO,
os=_original_os,
))
_open = FunctionType(
six.get_function_code(_original_pyio.open),
_open_environment,
)
def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
try:
fileno = name.fileno()
except AttributeError:
pass
else:
fileno = _original_os.dup(fileno)
name.close()
name = fileno
return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
GreenPipe.__doc__ = greenpipe_doc
| apache-2.0 |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationldappolicy_systemglobal_binding.py | 1 | 5329 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationldappolicy_systemglobal_binding(base_resource) :
""" Binding class showing the systemglobal that can be bound to authenticationldappolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the LDAP policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the LDAP policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationldappolicy_systemglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationldappolicy_systemglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch authenticationldappolicy_systemglobal_binding resources.
"""
try :
obj = authenticationldappolicy_systemglobal_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of authenticationldappolicy_systemglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationldappolicy_systemglobal_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count authenticationldappolicy_systemglobal_binding resources configued on NetScaler.
"""
try :
obj = authenticationldappolicy_systemglobal_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of authenticationldappolicy_systemglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationldappolicy_systemglobal_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationldappolicy_systemglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationldappolicy_systemglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationldappolicy_systemglobal_binding = [authenticationldappolicy_systemglobal_binding() for _ in range(length)]
| apache-2.0 |
aristotle-tek/cuny-bdif | AWS/ec2/lib/boto-2.34.0/tests/integration/rds2/__init__.py | 645 | 1175 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| mit |
DinoCow/airflow | tests/www/test_utils.py | 4 | 8869 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from urllib.parse import parse_qs
from bs4 import BeautifulSoup
from parameterized import parameterized
from airflow.www import utils
from airflow.www.utils import wrapped_markdown
from tests.test_utils.config import conf_vars
class TestUtils(unittest.TestCase):
def test_empty_variable_should_not_be_hidden(self):
self.assertFalse(utils.should_hide_value_for_key(""))
self.assertFalse(utils.should_hide_value_for_key(None))
def test_normal_variable_should_not_be_hidden(self):
self.assertFalse(utils.should_hide_value_for_key("key"))
def test_sensitive_variable_should_be_hidden(self):
self.assertTrue(utils.should_hide_value_for_key("google_api_key"))
def test_sensitive_variable_should_be_hidden_ic(self):
self.assertTrue(utils.should_hide_value_for_key("GOOGLE_API_KEY"))
@parameterized.expand(
[
('key', 'TRELLO_KEY', True),
('key', 'TRELLO_API_KEY', True),
('key', 'GITHUB_APIKEY', True),
('key, token', 'TRELLO_TOKEN', True),
('mysecretword, mysensitivekey', 'GITHUB_mysecretword', True),
],
)
def test_sensitive_variable_fields_should_be_hidden(
self, sensitive_variable_fields, key, expected_result
):
with conf_vars({('admin', 'sensitive_variable_fields'): str(sensitive_variable_fields)}):
self.assertEqual(expected_result, utils.should_hide_value_for_key(key))
@parameterized.expand(
[
(None, 'TRELLO_API', False),
('token', 'TRELLO_KEY', False),
('token, mysecretword', 'TRELLO_KEY', False),
],
)
def test_normal_variable_fields_should_not_be_hidden(
self, sensitive_variable_fields, key, expected_result
):
with conf_vars({('admin', 'sensitive_variable_fields'): str(sensitive_variable_fields)}):
self.assertEqual(expected_result, utils.should_hide_value_for_key(key))
def check_generate_pages_html(self, current_page, total_pages, window=7, check_middle=False):
extra_links = 4 # first, prev, next, last
search = "'>\"/><img src=x onerror=alert(1)>"
html_str = utils.generate_pages(current_page, total_pages, search=search)
self.assertNotIn(search, html_str, "The raw search string shouldn't appear in the output")
self.assertIn('search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E', html_str)
self.assertTrue(callable(html_str.__html__), "Should return something that is HTML-escaping aware")
dom = BeautifulSoup(html_str, 'html.parser')
self.assertIsNotNone(dom)
ulist = dom.ul
ulist_items = ulist.find_all('li')
self.assertEqual(min(window, total_pages) + extra_links, len(ulist_items))
page_items = ulist_items[2:-2]
mid = int(len(page_items) / 2)
for i, item in enumerate(page_items):
a_node = item.a
href_link = a_node['href']
node_text = a_node.string
if node_text == str(current_page + 1):
if check_middle:
self.assertEqual(mid, i)
self.assertEqual('javascript:void(0)', href_link)
self.assertIn('active', item['class'])
else:
self.assertRegex(href_link, r'^\?', 'Link is page-relative')
query = parse_qs(href_link[1:])
self.assertListEqual(query['page'], [str(int(node_text) - 1)])
self.assertListEqual(query['search'], [search])
def test_generate_pager_current_start(self):
self.check_generate_pages_html(current_page=0, total_pages=6)
def test_generate_pager_current_middle(self):
self.check_generate_pages_html(current_page=10, total_pages=20, check_middle=True)
def test_generate_pager_current_end(self):
self.check_generate_pages_html(current_page=38, total_pages=39)
def test_params_no_values(self):
"""Should return an empty string if no params are passed"""
self.assertEqual('', utils.get_params())
def test_params_search(self):
self.assertEqual('search=bash_', utils.get_params(search='bash_'))
def test_params_none_and_zero(self):
query_str = utils.get_params(a=0, b=None, c='true')
# The order won't be consistent, but that doesn't affect behaviour of a browser
pairs = list(sorted(query_str.split('&')))
self.assertListEqual(['a=0', 'c=true'], pairs)
def test_params_all(self):
query = utils.get_params(status='active', page=3, search='bash_')
self.assertEqual({'page': ['3'], 'search': ['bash_'], 'status': ['active']}, parse_qs(query))
def test_params_escape(self):
self.assertEqual(
'search=%27%3E%22%2F%3E%3Cimg+src%3Dx+onerror%3Dalert%281%29%3E',
utils.get_params(search="'>\"/><img src=x onerror=alert(1)>"),
)
def test_state_token(self):
# It's shouldn't possible to set these odd values anymore, but lets
# ensure they are escaped!
html = str(utils.state_token('<script>alert(1)</script>'))
self.assertIn(
'<script>alert(1)</script>',
html,
)
self.assertNotIn(
'<script>alert(1)</script>',
html,
)
def test_task_instance_link(self):
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(
utils.task_instance_link(
{'dag_id': '<a&1>', 'task_id': '<b2>', 'execution_date': datetime.now()}
)
)
self.assertIn('%3Ca%261%3E', html)
self.assertIn('%3Cb2%3E', html)
self.assertNotIn('<a&1>', html)
self.assertNotIn('<b2>', html)
def test_dag_link(self):
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(utils.dag_link({'dag_id': '<a&1>', 'execution_date': datetime.now()}))
self.assertIn('%3Ca%261%3E', html)
self.assertNotIn('<a&1>', html)
def test_dag_run_link(self):
from airflow.www.app import cached_app
with cached_app(testing=True).test_request_context():
html = str(
utils.dag_run_link({'dag_id': '<a&1>', 'run_id': '<b2>', 'execution_date': datetime.now()})
)
self.assertIn('%3Ca%261%3E', html)
self.assertIn('%3Cb2%3E', html)
self.assertNotIn('<a&1>', html)
self.assertNotIn('<b2>', html)
class TestAttrRenderer(unittest.TestCase):
def setUp(self):
self.attr_renderer = utils.get_attr_renderer()
def test_python_callable(self):
def example_callable(unused_self):
print("example")
rendered = self.attr_renderer["python_callable"](example_callable)
self.assertIn('"example"', rendered)
def test_python_callable_none(self):
rendered = self.attr_renderer["python_callable"](None)
self.assertEqual("", rendered)
def test_markdown(self):
markdown = "* foo\n* bar"
rendered = self.attr_renderer["doc_md"](markdown)
self.assertIn("<li>foo</li>", rendered)
self.assertIn("<li>bar</li>", rendered)
def test_markdown_none(self):
rendered = self.attr_renderer["python_callable"](None)
self.assertEqual("", rendered)
class TestWrappedMarkdown(unittest.TestCase):
def test_wrapped_markdown_with_docstring_curly_braces(self):
rendered = wrapped_markdown("{braces}", css_class="a_class")
self.assertEqual('<div class="a_class" ><p>{braces}</p></div>', rendered)
def test_wrapped_markdown_with_some_markdown(self):
rendered = wrapped_markdown("*italic*\n**bold**\n", css_class="a_class")
self.assertEqual(
'''<div class="a_class" ><p><em>italic</em>
<strong>bold</strong></p></div>''',
rendered,
)
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/262_test_hash.py | 55 | 4631 | # test the invariant that
# iff a==b then hash(a)==hash(b)
#
# Also test that hash implementations are inherited as expected
import unittest
from test import test_support
from collections import Hashable
class HashEqualityTestCase(unittest.TestCase):
def same_hash(self, *objlist):
# Hash each object given and fail if
# the hash values are not all the same.
hashed = map(hash, objlist)
for h in hashed[1:]:
if h != hashed[0]:
self.fail("hashed values differ: %r" % (objlist,))
def test_numeric_literals(self):
self.same_hash(1, 1L, 1.0, 1.0+0.0j)
self.same_hash(0, 0L, 0.0, 0.0+0.0j)
self.same_hash(-1, -1L, -1.0, -1.0+0.0j)
self.same_hash(-2, -2L, -2.0, -2.0+0.0j)
def test_coerced_integers(self):
self.same_hash(int(1), long(1), float(1), complex(1),
int('1'), float('1.0'))
self.same_hash(int(-2**31), long(-2**31), float(-2**31))
self.same_hash(int(1-2**31), long(1-2**31), float(1-2**31))
self.same_hash(int(2**31-1), long(2**31-1), float(2**31-1))
# for 64-bit platforms
self.same_hash(int(2**31), long(2**31), float(2**31))
self.same_hash(int(-2**63), long(-2**63), float(-2**63))
self.same_hash(int(1-2**63), long(1-2**63))
self.same_hash(int(2**63-1), long(2**63-1))
def test_coerced_floats(self):
self.same_hash(long(1.23e300), float(1.23e300))
self.same_hash(float(0.5), complex(0.5, 0.0))
_default_hash = object.__hash__
class DefaultHash(object): pass
_FIXED_HASH_VALUE = 42
class FixedHash(object):
def __hash__(self):
return _FIXED_HASH_VALUE
class OnlyEquality(object):
def __eq__(self, other):
return self is other
# Trick to suppress Py3k warning in 2.x
__hash__ = None
del OnlyEquality.__hash__
class OnlyInequality(object):
def __ne__(self, other):
return self is not other
class OnlyCmp(object):
def __cmp__(self, other):
return cmp(id(self), id(other))
# Trick to suppress Py3k warning in 2.x
__hash__ = None
del OnlyCmp.__hash__
class InheritedHashWithEquality(FixedHash, OnlyEquality): pass
class InheritedHashWithInequality(FixedHash, OnlyInequality): pass
class InheritedHashWithCmp(FixedHash, OnlyCmp): pass
class NoHash(object):
__hash__ = None
class HashInheritanceTestCase(unittest.TestCase):
default_expected = [object(),
DefaultHash(),
OnlyEquality(),
OnlyInequality(),
OnlyCmp(),
]
fixed_expected = [FixedHash(),
InheritedHashWithEquality(),
InheritedHashWithInequality(),
InheritedHashWithCmp(),
]
error_expected = [NoHash()]
def test_default_hash(self):
for obj in self.default_expected:
self.assertEqual(hash(obj), _default_hash(obj))
def test_fixed_hash(self):
for obj in self.fixed_expected:
self.assertEqual(hash(obj), _FIXED_HASH_VALUE)
def test_error_hash(self):
for obj in self.error_expected:
self.assertRaises(TypeError, hash, obj)
def test_hashable(self):
objects = (self.default_expected +
self.fixed_expected)
for obj in objects:
self.assert_(isinstance(obj, Hashable), repr(obj))
def test_not_hashable(self):
for obj in self.error_expected:
self.assertFalse(isinstance(obj, Hashable), repr(obj))
# Issue #4701: Check that some builtin types are correctly hashable
# (This test only used to fail in Python 3.0, but has been included
# in 2.x along with the lazy call to PyType_Ready in PyObject_Hash)
class DefaultIterSeq(object):
seq = range(10)
def __len__(self):
return len(self.seq)
def __getitem__(self, index):
return self.seq[index]
class HashBuiltinsTestCase(unittest.TestCase):
hashes_to_check = [xrange(10),
enumerate(xrange(10)),
iter(DefaultIterSeq()),
iter(lambda: 0, 0),
]
def test_hashes(self):
_default_hash = object.__hash__
for obj in self.hashes_to_check:
self.assertEqual(hash(obj), _default_hash(obj))
def test_main():
test_support.run_unittest(HashEqualityTestCase,
HashInheritanceTestCase,
HashBuiltinsTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
mark-ignacio/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/committers.py | 121 | 11526 | # Copyright (c) 2011, Apple Inc. All rights reserved.
# Copyright (c) 2009, 2011, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for committer and reviewer validation.
import fnmatch
import json
from webkitpy.common.editdistance import edit_distance
from webkitpy.common.memoized import memoized
from webkitpy.common.system.filesystem import FileSystem
# The list of contributors have been moved to contributors.json
class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
self.full_name = name
if isinstance(email_or_emails, str):
self.emails = [email_or_emails]
else:
self.emails = email_or_emails
self.emails = map(lambda email: email.lower(), self.emails) # Emails are case-insensitive.
if isinstance(irc_nickname_or_nicknames, str):
self.irc_nicknames = [irc_nickname_or_nicknames]
else:
self.irc_nicknames = irc_nickname_or_nicknames
self.can_commit = False
self.can_review = False
def bugzilla_email(self):
# FIXME: We're assuming the first email is a valid bugzilla email,
# which might not be right.
return self.emails[0]
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
string = search_string.lower()
if string in self.full_name.lower():
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if string in nickname.lower():
return True
for email in self.emails:
if string in email:
return True
return False
def matches_glob(self, glob_string):
if fnmatch.fnmatch(self.full_name, glob_string):
return True
if self.irc_nicknames:
for nickname in self.irc_nicknames:
if fnmatch.fnmatch(nickname, glob_string):
return True
for email in self.emails:
if fnmatch.fnmatch(email, glob_string):
return True
return False
class Committer(Contributor):
def __init__(self, name, email_or_emails, irc_nickname=None):
Contributor.__init__(self, name, email_or_emails, irc_nickname)
self.can_commit = True
class Reviewer(Committer):
def __init__(self, name, email_or_emails, irc_nickname=None):
Committer.__init__(self, name, email_or_emails, irc_nickname)
self.can_review = True
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
committers=[],
reviewers=[],
contributors=[]):
# FIXME: These arguments only exist for testing. Clean it up.
if not (committers or reviewers or contributors):
loaded_data = self.load_json()
contributors = loaded_data['Contributors']
committers = loaded_data['Committers']
reviewers = loaded_data['Reviewers']
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
self._contributors_by_name = {}
self._accounts_by_email = {}
self._accounts_by_login = {}
@staticmethod
@memoized
def load_json():
filesystem = FileSystem()
json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
contributors = json.loads(filesystem.read_text_file(json_path))
return {
'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
}
def contributors(self):
return self._contributors
def committers(self):
return self._committers
def reviewers(self):
return self._reviewers
def _name_to_contributor_map(self):
if not len(self._contributors_by_name):
for contributor in self._contributors:
assert(contributor.full_name)
assert(contributor.full_name.lower() not in self._contributors_by_name) # We should never have duplicate names.
self._contributors_by_name[contributor.full_name.lower()] = contributor
return self._contributors_by_name
def _email_to_account_map(self):
if not len(self._accounts_by_email):
for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
return self._accounts_by_email
def _login_to_account_map(self):
if not len(self._accounts_by_login):
for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
def _committer_only(self, record):
if record and not record.can_commit:
return None
return record
def _reviewer_only(self, record):
if record and not record.can_review:
return None
return record
def committer_by_name(self, name):
return self._committer_only(self.contributor_by_name(name))
def contributor_by_irc_nickname(self, irc_nickname):
for contributor in self.contributors():
# FIXME: This should do case-insensitive comparison or assert that all IRC nicknames are in lowercase
if contributor.irc_nicknames and irc_nickname in contributor.irc_nicknames:
return contributor
return None
def contributors_by_search_string(self, string):
glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
result = []
for contributor in self.contributors():
for email in contributor.emails:
if email.startswith(string):
result.append(contributor)
break
return result
def _contributor_name_shorthands(self, contributor):
if ' ' not in contributor.full_name:
return []
split_fullname = contributor.full_name.split()
first_name = split_fullname[0]
last_name = split_fullname[-1]
return first_name, last_name, first_name + last_name[0], first_name + ' ' + last_name[0]
def _tokenize_contributor_name(self, contributor):
full_name_in_lowercase = contributor.full_name.lower()
tokens = [full_name_in_lowercase] + full_name_in_lowercase.split()
if contributor.irc_nicknames:
return tokens + [nickname.lower() for nickname in contributor.irc_nicknames if len(nickname) > 5]
return tokens
def contributors_by_fuzzy_match(self, string):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
# 2. Exact match for email username (before @)
accounts = self.contributors_by_email_username(string_in_lowercase)
if accounts and len(accounts) == 1:
return accounts, 0
# 3. Exact match for first name, last name, and first name + initial combinations such as "Dan B" and "Tim H"
accounts = [contributor for contributor in self.contributors() if string in self._contributor_name_shorthands(contributor)]
if accounts and len(accounts) == 1:
return accounts, 0
# 4. Finally, fuzzy-match using edit-distance
string = string_in_lowercase
contributorWithMinDistance = []
minDistance = len(string) / 2 - 1
for contributor in self.contributors():
tokens = self._tokenize_contributor_name(contributor)
editdistances = [edit_distance(token, string) for token in tokens if abs(len(token) - len(string)) <= minDistance]
if not editdistances:
continue
distance = min(editdistances)
if distance == minDistance:
contributorWithMinDistance.append(contributor)
elif distance < minDistance:
contributorWithMinDistance = [contributor]
minDistance = distance
if not len(contributorWithMinDistance):
return [], len(string)
return contributorWithMinDistance, minDistance
def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
def committer_by_email(self, email):
return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
return self._reviewer_only(self.contributor_by_email(email))
| bsd-3-clause |
negritobomb/edxplat | playbooks/roles/gh_mirror/files/repos_from_orgs.py | 79 | 3314 | #!/usr/bin/python
# Given a list of repos in a yaml
# file will create or update mirrors
#
# Generates /var/tmp/repos.json from
# a yaml file containing a list of
# github organizations
import yaml
import sys
import requests
import json
import subprocess
import os
import logging
import fcntl
from os.path import dirname, abspath, join
from argparse import ArgumentParser
def check_running(run_type=''):
pid_file = '{}-{}.pid'.format(
os.path.basename(__file__),run_type)
fp = open(pid_file, 'w')
try:
fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# another instance is running
sys.exit(0)
def run_cmd(cmd):
logging.debug('running: {}\n'.format(cmd))
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
for line in iter(process.stdout.readline, ""):
logging.debug(line)
def parse_args():
parser = ArgumentParser()
parser.add_argument('-r', '--refresh', action='store_true',
help="Refresh the list of repos", default=False)
parser.add_argument('-d', '--datadir', help="repo directory")
return parser.parse_args()
def refresh_cache():
path = dirname(abspath(__file__))
try:
with open(join(path, 'orgs.yml')) as f:
orgs = yaml.load(f)
except IOError:
print "Unable to read {}/orgs.yml, does it exist?".format(path)
sys.exit(1)
repos = []
for org in orgs:
page = 1
while True:
r = requests.get('https://api.github.com/users/{}/repos?page={}'.format(org, page))
org_data = r.json()
# request pages until we get zero results
if not isinstance(org_data, list) or len(org_data) == 0:
break
for repo_data in org_data:
if 'html_url' in repo_data:
repos.append({'html_url': repo_data['html_url'],
'name': repo_data['name'],
'org': repo_data['owner']['login']})
page += 1
with open('/var/tmp/repos.json', 'wb') as f:
f.write(json.dumps(repos))
def update_repos():
with open('/var/tmp/repos.json') as f:
repos = json.load(f)
for repo in repos:
repo_path = os.path.join(args.datadir, repo['org'], repo['name'] + '.git')
if not os.path.exists(repo_path):
run_cmd('mkdir -p {}'.format(repo_path))
run_cmd('git clone --mirror {} {}'.format(repo['html_url'], repo_path))
run_cmd('cd {} && git update-server-info'.format(repo_path))
else:
run_cmd('cd {} && git fetch --all --tags'.format(repo_path))
run_cmd('cd {} && git update-server-info'.format(repo_path))
if __name__ == '__main__':
args = parse_args()
logging.basicConfig(filename='/var/log/repos-from-orgs.log',
level=logging.DEBUG)
if args.refresh:
check_running('refresh')
refresh_cache()
else:
check_running()
if not args.datadir:
print "Please specificy a repository directory"
sys.exit(1)
if not os.path.exists('/var/tmp/repos.json'):
refresh_cache()
update_repos()
| agpl-3.0 |
sorrison/django-pbs | django_pbs/servers/models.py | 1 | 5165 | # Copyright 2008 VPAC
#
# This file is part of django-pbs.
#
# django-pbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# django-pbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-pbs If not, see <http://www.gnu.org/licenses/>.
from PBSQuery import PBSQuery
from django_pbs.jobs.models import Job
class Server(object):
def __init__(self, server_name):
p = PBSQuery(str(server_name))
info = p.get_serverinfo().items()[0]
self.name = info[0]
self.p = p
for k,v in info[1].items():
if k.startswith('resources'):
for i,j in v.items():
setattr(self, k + '_' + i, j[0])
else:
setattr(self, k, v[0])
def __str__(self):
return self.name
def _get_pk_val(self):
return self.name
def get_absolute_url(self):
return '/servers/%s/' % self.name
def getnodes(self):
return self.p.getnodes()
def getqueues(self):
return self.p.getqueues()
def getjobs(self):
return self.p.getjobs()
def getjob(self, id):
return self.p.getjob(id)
def cpu_stats(self):
node_data = self.getnodes()
total = 0
used = 0
for k, n in node_data.items():
if 'cluster' not in n['ntype']:
continue
if 'offline' in n['state'] or 'down' in n['state']:
continue
total += int(n['np'][0])
try:
used += len(n['jobs'])
except:
pass
return used,total
def job_list(self, usernames=None):
data_list = self.getjobs()
job_list = []
for d in data_list:
id, host = d.split('.', 1)
if usernames:
owner, host = data_list[d]['Job_Owner'][0].split('@')
if owner in usernames:
job_list.append(Job(server=self, id=id, data=data_list[d]))
else:
job_list.append(Job(server=self, id=id, data=data_list[d]))
return job_list
def node_list(self):
data = self.getnodes()
nodes = []
for k,v in data.items():
v = dict(v)
nodes.append(Node(self, k, v))
return nodes
def queue_list(self):
data = self.getqueues()
queues = []
for k,v in data.items():
queues.append(Queue(self, k, dict(v)))
return queues
class Queue:
def __init__(self, server, name, data=None):
self.server = server
self.name = str(name)
if not data:
data = self.server.p.getqueue(self.name)
for k,v in data.items():
if k.startswith('resources'):
for i,j in v.items():
setattr(self, k + '_' + i, j[0])
else:
setattr(self, k, v[0])
def __str__(self):
return self.name
def _get_pk_val(self):
return self.name
def get_absolute_url(self):
return '/servers/%s/queues/%s/' % (self.server, self.name)
class Node:
def __init__(self, server, name, data=None):
self.server = server
self.name = str(name)
if not data:
data = self.server.p.getnode(self.name)
self.type = data['ntype'][0]
self.np = int(data['np'][0])
if 'jobs' in data:
self.np_used = len(data['jobs'])
else:
self.np_used = 0
if 'free' in data['state'] and 'jobs' in data:
self.state = 'partial'
else:
self.state = data['state'][0]
self.jobs = data.get('jobs', None)
try:
self.note = data['note'][0]
except KeyError:
self.note = ''
#self.job_list = self.get_job_list()
def __str__(self):
return self.name
def _get_pk_val(self):
return self.name
def is_free(self):
if self.state == 'free':
return True
return False
def is_online(self):
if state == 'down' or state == 'offline':
return False
return True
def get_job_list(self):
job_list = []
if not self.jobs:
return job_list
for j in self.jobs:
full_id = j[j.index('/')+1:]
id, host = full_id.split('.', 1)
job_list.append(Job(server=self.server, id=id, full_id=full_id))
return job_list
def get_absolute_url(self):
return '/servers/%s/nodes/%s/' % (self.server, self.name)
| gpl-3.0 |
Elettronik/SickRage | lib/unidecode/x0ba.py | 253 | 4765 | data = (
'mya', # 0x00
'myag', # 0x01
'myagg', # 0x02
'myags', # 0x03
'myan', # 0x04
'myanj', # 0x05
'myanh', # 0x06
'myad', # 0x07
'myal', # 0x08
'myalg', # 0x09
'myalm', # 0x0a
'myalb', # 0x0b
'myals', # 0x0c
'myalt', # 0x0d
'myalp', # 0x0e
'myalh', # 0x0f
'myam', # 0x10
'myab', # 0x11
'myabs', # 0x12
'myas', # 0x13
'myass', # 0x14
'myang', # 0x15
'myaj', # 0x16
'myac', # 0x17
'myak', # 0x18
'myat', # 0x19
'myap', # 0x1a
'myah', # 0x1b
'myae', # 0x1c
'myaeg', # 0x1d
'myaegg', # 0x1e
'myaegs', # 0x1f
'myaen', # 0x20
'myaenj', # 0x21
'myaenh', # 0x22
'myaed', # 0x23
'myael', # 0x24
'myaelg', # 0x25
'myaelm', # 0x26
'myaelb', # 0x27
'myaels', # 0x28
'myaelt', # 0x29
'myaelp', # 0x2a
'myaelh', # 0x2b
'myaem', # 0x2c
'myaeb', # 0x2d
'myaebs', # 0x2e
'myaes', # 0x2f
'myaess', # 0x30
'myaeng', # 0x31
'myaej', # 0x32
'myaec', # 0x33
'myaek', # 0x34
'myaet', # 0x35
'myaep', # 0x36
'myaeh', # 0x37
'meo', # 0x38
'meog', # 0x39
'meogg', # 0x3a
'meogs', # 0x3b
'meon', # 0x3c
'meonj', # 0x3d
'meonh', # 0x3e
'meod', # 0x3f
'meol', # 0x40
'meolg', # 0x41
'meolm', # 0x42
'meolb', # 0x43
'meols', # 0x44
'meolt', # 0x45
'meolp', # 0x46
'meolh', # 0x47
'meom', # 0x48
'meob', # 0x49
'meobs', # 0x4a
'meos', # 0x4b
'meoss', # 0x4c
'meong', # 0x4d
'meoj', # 0x4e
'meoc', # 0x4f
'meok', # 0x50
'meot', # 0x51
'meop', # 0x52
'meoh', # 0x53
'me', # 0x54
'meg', # 0x55
'megg', # 0x56
'megs', # 0x57
'men', # 0x58
'menj', # 0x59
'menh', # 0x5a
'med', # 0x5b
'mel', # 0x5c
'melg', # 0x5d
'melm', # 0x5e
'melb', # 0x5f
'mels', # 0x60
'melt', # 0x61
'melp', # 0x62
'melh', # 0x63
'mem', # 0x64
'meb', # 0x65
'mebs', # 0x66
'mes', # 0x67
'mess', # 0x68
'meng', # 0x69
'mej', # 0x6a
'mec', # 0x6b
'mek', # 0x6c
'met', # 0x6d
'mep', # 0x6e
'meh', # 0x6f
'myeo', # 0x70
'myeog', # 0x71
'myeogg', # 0x72
'myeogs', # 0x73
'myeon', # 0x74
'myeonj', # 0x75
'myeonh', # 0x76
'myeod', # 0x77
'myeol', # 0x78
'myeolg', # 0x79
'myeolm', # 0x7a
'myeolb', # 0x7b
'myeols', # 0x7c
'myeolt', # 0x7d
'myeolp', # 0x7e
'myeolh', # 0x7f
'myeom', # 0x80
'myeob', # 0x81
'myeobs', # 0x82
'myeos', # 0x83
'myeoss', # 0x84
'myeong', # 0x85
'myeoj', # 0x86
'myeoc', # 0x87
'myeok', # 0x88
'myeot', # 0x89
'myeop', # 0x8a
'myeoh', # 0x8b
'mye', # 0x8c
'myeg', # 0x8d
'myegg', # 0x8e
'myegs', # 0x8f
'myen', # 0x90
'myenj', # 0x91
'myenh', # 0x92
'myed', # 0x93
'myel', # 0x94
'myelg', # 0x95
'myelm', # 0x96
'myelb', # 0x97
'myels', # 0x98
'myelt', # 0x99
'myelp', # 0x9a
'myelh', # 0x9b
'myem', # 0x9c
'myeb', # 0x9d
'myebs', # 0x9e
'myes', # 0x9f
'myess', # 0xa0
'myeng', # 0xa1
'myej', # 0xa2
'myec', # 0xa3
'myek', # 0xa4
'myet', # 0xa5
'myep', # 0xa6
'myeh', # 0xa7
'mo', # 0xa8
'mog', # 0xa9
'mogg', # 0xaa
'mogs', # 0xab
'mon', # 0xac
'monj', # 0xad
'monh', # 0xae
'mod', # 0xaf
'mol', # 0xb0
'molg', # 0xb1
'molm', # 0xb2
'molb', # 0xb3
'mols', # 0xb4
'molt', # 0xb5
'molp', # 0xb6
'molh', # 0xb7
'mom', # 0xb8
'mob', # 0xb9
'mobs', # 0xba
'mos', # 0xbb
'moss', # 0xbc
'mong', # 0xbd
'moj', # 0xbe
'moc', # 0xbf
'mok', # 0xc0
'mot', # 0xc1
'mop', # 0xc2
'moh', # 0xc3
'mwa', # 0xc4
'mwag', # 0xc5
'mwagg', # 0xc6
'mwags', # 0xc7
'mwan', # 0xc8
'mwanj', # 0xc9
'mwanh', # 0xca
'mwad', # 0xcb
'mwal', # 0xcc
'mwalg', # 0xcd
'mwalm', # 0xce
'mwalb', # 0xcf
'mwals', # 0xd0
'mwalt', # 0xd1
'mwalp', # 0xd2
'mwalh', # 0xd3
'mwam', # 0xd4
'mwab', # 0xd5
'mwabs', # 0xd6
'mwas', # 0xd7
'mwass', # 0xd8
'mwang', # 0xd9
'mwaj', # 0xda
'mwac', # 0xdb
'mwak', # 0xdc
'mwat', # 0xdd
'mwap', # 0xde
'mwah', # 0xdf
'mwae', # 0xe0
'mwaeg', # 0xe1
'mwaegg', # 0xe2
'mwaegs', # 0xe3
'mwaen', # 0xe4
'mwaenj', # 0xe5
'mwaenh', # 0xe6
'mwaed', # 0xe7
'mwael', # 0xe8
'mwaelg', # 0xe9
'mwaelm', # 0xea
'mwaelb', # 0xeb
'mwaels', # 0xec
'mwaelt', # 0xed
'mwaelp', # 0xee
'mwaelh', # 0xef
'mwaem', # 0xf0
'mwaeb', # 0xf1
'mwaebs', # 0xf2
'mwaes', # 0xf3
'mwaess', # 0xf4
'mwaeng', # 0xf5
'mwaej', # 0xf6
'mwaec', # 0xf7
'mwaek', # 0xf8
'mwaet', # 0xf9
'mwaep', # 0xfa
'mwaeh', # 0xfb
'moe', # 0xfc
'moeg', # 0xfd
'moegg', # 0xfe
'moegs', # 0xff
)
| gpl-3.0 |
chirilo/kuma | vendor/packages/pygments/lexers/_postgres_builtins.py | 43 | 11210 | # -*- coding: utf-8 -*-
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL lexer.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BY',
'CACHE',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'CONCURRENTLY',
'CONFIGURATION',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DESC',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IN',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LC_COLLATE',
'LC_CTYPE',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEXT',
'NO',
'NONE',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OWNED',
'OWNER',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROGRAM',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SMALLINT',
'SNAPSHOT',
'SOME',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STRICT',
'STRIP',
'SUBSTRING',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'money',
'numeric',
'path',
'pg_lsn',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyelement',
'anyarray',
'anynonarray',
'anyenum',
'anyrange',
'cstring',
'internal',
'language_handler',
'fdw_handler',
'record',
'trigger',
'void',
'opaque',
)
# Remove 'trigger' from types
PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
PLPGSQL_KEYWORDS = (
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
)
if __name__ == '__main__': # pragma: no cover
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from pygments.util import format_lines
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
data_file = list(urlopen(DATATYPES_URL))
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
keywords = parse_keywords(urlopen(KEYWORDS_URL))
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
def parse_keywords(f):
kw = []
for m in re.finditer(
r'\s*<entry><token>([^<]+)</token></entry>\s*'
r'<entry>([^<]+)</entry>', f.read()):
kw.append(m.group(1))
if not kw:
raise ValueError('no keyword found')
kw.sort()
return kw
def parse_datatypes(f):
dt = set()
for line in f:
if '<sect1' in line:
break
if '<entry><type>' not in line:
continue
# Parse a string such as
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
# into types "time" and "without time zone"
# remove all the tags
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
line = re.sub("<[^>]+>", "", line)
# Drop the parts containing braces
for tmp in [t for tmp in line.split('[')
for t in tmp.split(']') if "(" not in t]:
for t in tmp.split(','):
t = t.strip()
if not t: continue
dt.add(" ".join(t.split()))
dt = list(dt)
dt.sort()
return dt
def parse_pseudos(f):
dt = []
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
re_entry = re.compile(r'\s*<entry><type>([^<]+)</></entry>')
re_end = re.compile(r'\s*</table>')
f = iter(f)
for line in f:
if re_start.match(line) is not None:
break
else:
raise ValueError('pseudo datatypes table not found')
for line in f:
m = re_entry.match(line)
if m is not None:
dt.append(m.group(1))
if re_end.match(line) is not None:
break
else:
raise ValueError('end of pseudo datatypes table not found')
if not dt:
raise ValueError('pseudo datatypes not found')
return dt
def update_consts(filename, constname, content):
with open(filename) as f:
data = f.read()
# Line to start/end inserting
re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S)
m = re_match.search(data)
if not m:
raise ValueError('Could not find existing definition for %s' %
(constname,))
new_block = format_lines(constname, content)
data = data[:m.start()] + new_block + data[m.end():]
with open(filename, 'w') as f:
f.write(data)
update_myself()
| mpl-2.0 |
n-pigeon/godot | platform/x11/detect.py | 4 | 9137 | import os
import platform
import sys
def is_active():
return True
def get_name():
return "X11"
def can_build():
if (os.name != "posix" or sys.platform == "darwin"):
return False
# Check the minimal dependencies
x11_error = os.system("pkg-config --version > /dev/null")
if (x11_error):
print("pkg-config not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config x11 --modversion > /dev/null ")
if (x11_error):
print("X11 not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xcursor --modversion > /dev/null ")
if (x11_error):
print("xcursor not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xinerama --modversion > /dev/null ")
if (x11_error):
print("xinerama not found.. x11 disabled.")
return False
x11_error = os.system("pkg-config xrandr --modversion > /dev/null ")
if (x11_error):
print("xrandr not found.. x11 disabled.")
return False
return True
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
BoolVariable('use_llvm', 'Use the LLVM compiler', False),
BoolVariable('use_static_cpp', 'Link stdc++ statically', False),
BoolVariable('use_sanitizer', 'Use LLVM compiler address sanitizer', False),
BoolVariable('use_leak_sanitizer', 'Use LLVM compiler memory leaks sanitizer (implies use_sanitizer)', False),
BoolVariable('pulseaudio', 'Detect & use pulseaudio', True),
BoolVariable('udev', 'Use udev for gamepad connection callbacks', False),
EnumVariable('debug_symbols', 'Add debug symbols to release version', 'yes', ('yes', 'no', 'full')),
]
def get_flags():
return [
('builtin_freetype', False),
('builtin_libpng', False),
('builtin_openssl', False),
('builtin_zlib', False),
]
def configure(env):
## Build type
if (env["target"] == "release"):
# -O3 -ffast-math is identical to -Ofast. We need to split it out so we can selectively disable
# -ffast-math in code for which it generates wrong results.
env.Prepend(CCFLAGS=['-O3', '-ffast-math'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "release_debug"):
env.Prepend(CCFLAGS=['-O2', '-ffast-math', '-DDEBUG_ENABLED'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "debug"):
env.Prepend(CCFLAGS=['-g3', '-DDEBUG_ENABLED', '-DDEBUG_MEMORY_ENABLED'])
env.Append(LINKFLAGS=['-rdynamic'])
## Architecture
is64 = sys.maxsize > 2**32
if (env["bits"] == "default"):
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if 'CXX' in env and 'clang' in env['CXX']:
# Convenience check to enforce the use_llvm overrides when CXX is clang(++)
env['use_llvm'] = True
if env['use_llvm']:
if ('clang++' not in env['CXX']):
env["CC"] = "clang"
env["CXX"] = "clang++"
env["LD"] = "clang++"
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env.extra_suffix = ".llvm" + env.extra_suffix
# leak sanitizer requires (address) sanitizer
if env['use_sanitizer'] or env['use_leak_sanitizer']:
env.Append(CCFLAGS=['-fsanitize=address', '-fno-omit-frame-pointer'])
env.Append(LINKFLAGS=['-fsanitize=address'])
env.extra_suffix += "s"
if env['use_leak_sanitizer']:
env.Append(CCFLAGS=['-fsanitize=leak'])
env.Append(LINKFLAGS=['-fsanitize=leak'])
if env['use_lto']:
env.Append(CCFLAGS=['-flto'])
if not env['use_llvm'] and env.GetOption("num_jobs") > 1:
env.Append(LINKFLAGS=['-flto=' + str(env.GetOption("num_jobs"))])
else:
env.Append(LINKFLAGS=['-flto'])
if not env['use_llvm']:
env['RANLIB'] = 'gcc-ranlib'
env['AR'] = 'gcc-ar'
env.Append(CCFLAGS=['-pipe'])
env.Append(LINKFLAGS=['-pipe'])
## Dependencies
env.ParseConfig('pkg-config x11 --cflags --libs')
env.ParseConfig('pkg-config xcursor --cflags --libs')
env.ParseConfig('pkg-config xinerama --cflags --libs')
env.ParseConfig('pkg-config xrandr --cflags --libs')
# FIXME: Check for existence of the libs before parsing their flags with pkg-config
if not env['builtin_openssl']:
env.ParseConfig('pkg-config openssl --cflags --libs')
if not env['builtin_libwebp']:
env.ParseConfig('pkg-config libwebp --cflags --libs')
# freetype depends on libpng and zlib, so bundling one of them while keeping others
# as shared libraries leads to weird issues
if env['builtin_freetype'] or env['builtin_libpng'] or env['builtin_zlib']:
env['builtin_freetype'] = True
env['builtin_libpng'] = True
env['builtin_zlib'] = True
if not env['builtin_freetype']:
env.ParseConfig('pkg-config freetype2 --cflags --libs')
if not env['builtin_libpng']:
env.ParseConfig('pkg-config libpng --cflags --libs')
if not env['builtin_enet']:
env.ParseConfig('pkg-config libenet --cflags --libs')
if not env['builtin_squish'] and env['tools']:
env.ParseConfig('pkg-config libsquish --cflags --libs')
if not env['builtin_zstd']:
env.ParseConfig('pkg-config libzstd --cflags --libs')
# Sound and video libraries
# Keep the order as it triggers chained dependencies (ogg needed by others, etc.)
if not env['builtin_libtheora']:
env['builtin_libogg'] = False # Needed to link against system libtheora
env['builtin_libvorbis'] = False # Needed to link against system libtheora
env.ParseConfig('pkg-config theora theoradec --cflags --libs')
if not env['builtin_libvpx']:
env.ParseConfig('pkg-config vpx --cflags --libs')
if not env['builtin_libvorbis']:
env['builtin_libogg'] = False # Needed to link against system libvorbis
env.ParseConfig('pkg-config vorbis vorbisfile --cflags --libs')
if not env['builtin_opus']:
env['builtin_libogg'] = False # Needed to link against system opus
env.ParseConfig('pkg-config opus opusfile --cflags --libs')
if not env['builtin_libogg']:
env.ParseConfig('pkg-config ogg --cflags --libs')
if env['builtin_libtheora']:
list_of_x86 = ['x86_64', 'x86', 'i386', 'i586']
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
# On Linux wchar_t should be 32-bits
# 16-bit library shouldn't be required due to compiler optimisations
if not env['builtin_pcre2']:
env.ParseConfig('pkg-config libpcre2-32 --cflags --libs')
## Flags
if (os.system("pkg-config --exists alsa") == 0): # 0 means found
print("Enabling ALSA")
env.Append(CPPFLAGS=["-DALSA_ENABLED"])
env.ParseConfig('pkg-config alsa --cflags --libs')
else:
print("ALSA libraries not found, disabling driver")
if env['pulseaudio']:
if (os.system("pkg-config --exists libpulse-simple") == 0): # 0 means found
print("Enabling PulseAudio")
env.Append(CPPFLAGS=["-DPULSEAUDIO_ENABLED"])
env.ParseConfig('pkg-config --cflags --libs libpulse-simple')
else:
print("PulseAudio development libraries not found, disabling driver")
if (platform.system() == "Linux"):
env.Append(CPPFLAGS=["-DJOYDEV_ENABLED"])
if env['udev']:
if (os.system("pkg-config --exists libudev") == 0): # 0 means found
print("Enabling udev support")
env.Append(CPPFLAGS=["-DUDEV_ENABLED"])
env.ParseConfig('pkg-config libudev --cflags --libs')
else:
print("libudev development libraries not found, disabling udev support")
# Linkflags below this line should typically stay the last ones
if not env['builtin_zlib']:
env.ParseConfig('pkg-config zlib --cflags --libs')
env.Append(CPPPATH=['#platform/x11'])
env.Append(CPPFLAGS=['-DX11_ENABLED', '-DUNIX_ENABLED', '-DOPENGL_ENABLED', '-DGLES2_ENABLED', '-DGLES_OVER_GL'])
env.Append(LIBS=['GL', 'pthread'])
if (platform.system() == "Linux"):
env.Append(LIBS=['dl'])
if (platform.system().find("BSD") >= 0):
env.Append(LIBS=['execinfo'])
## Cross-compilation
if (is64 and env["bits"] == "32"):
env.Append(CPPFLAGS=['-m32'])
env.Append(LINKFLAGS=['-m32', '-L/usr/lib/i386-linux-gnu'])
elif (not is64 and env["bits"] == "64"):
env.Append(CPPFLAGS=['-m64'])
env.Append(LINKFLAGS=['-m64', '-L/usr/lib/i686-linux-gnu'])
if env['use_static_cpp']:
env.Append(LINKFLAGS=['-static-libstdc++'])
| mit |
frank10704/DF_GCS_W | MissionPlanner-master/packages/IronPython.StdLib.2.7.5-beta1/content/Lib/encodings/mac_romanian.py | 93 | 14224 | """ Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
u'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
B-MOOC/edx-platform | common/test/acceptance/tests/studio/test_import_export.py | 22 | 12624 | """
Acceptance tests for the Import and Export pages
"""
from abc import abstractmethod
from bok_choy.promise import EmptyPromise
from datetime import datetime
from .base_studio_test import StudioLibraryTest, StudioCourseTest
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.import_export import ExportLibraryPage, ExportCoursePage, ImportLibraryPage, ImportCoursePage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.container import ContainerPage
from ...pages.studio.overview import CourseOutlinePage
# pylint: disable=no-member
class ExportTestMixin(object):
"""
Tests to run both for course and library export pages.
"""
def test_export(self):
"""
Scenario: I am able to export a course or library
Given that I have a course or library
And I click the download button
The download will succeed
And the file will be of the right MIME type.
"""
good_status, is_tarball_mimetype = self.export_page.download_tarball()
self.assertTrue(good_status)
self.assertTrue(is_tarball_mimetype)
class TestCourseExport(ExportTestMixin, StudioCourseTest):
"""
Export tests for courses.
"""
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a course.
Given that I have a course to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Course Export')
class TestLibraryExport(ExportTestMixin, StudioLibraryTest):
"""
Export tests for libraries.
"""
def setUp(self): # pylint: disable=arguments-differ
"""
Ensure a library exists and navigate to the library edit page.
"""
super(TestLibraryExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a library.
Given that I have a library to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Library Export')
# pylint: disable=no-member
class BadExportMixin(object):
"""
Test mixin for bad exports.
"""
def test_bad_export(self):
"""
Scenario: I should receive an error when attempting to export a broken course or library.
Given that I have a course or library
No error modal should be showing
When I click the export button
An error modal should be shown
When I click the modal's action button
I should arrive at the edit page for the broken component
"""
# No error should be there to start.
self.assertFalse(self.export_page.is_error_modal_showing())
self.export_page.click_export()
self.export_page.wait_for_error_modal()
self.export_page.click_modal_button()
EmptyPromise(
lambda: self.edit_page.is_browser_on_page,
'Arrived at component edit page',
timeout=30
)
class TestLibraryBadExport(BadExportMixin, StudioLibraryTest):
"""
Verify exporting a bad library causes an error.
"""
def setUp(self):
"""
Set up the pages and start the tests.
"""
super(TestLibraryBadExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.edit_page = LibraryEditPage(self.browser, self.library_key)
self.export_page.visit()
def populate_library_fixture(self, library_fixture):
"""
Create a library with a bad component.
"""
library_fixture.add_children(
XBlockFixtureDesc("problem", "Bad Problem", data='<'),
)
class TestCourseBadExport(BadExportMixin, StudioCourseTest):
"""
Verify exporting a bad course causes an error.
"""
ready_method = 'wait_for_component_menu'
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseBadExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.edit_page = ContainerPage(self.browser, self.unit.locator)
self.export_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the course with a unit that has a bad problem.
"""
self.unit = XBlockFixtureDesc('vertical', 'Unit')
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Main Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection').add_children(
self.unit.add_children(
XBlockFixtureDesc("problem", "Bad Problem", data='<')
)
)
)
)
# pylint: disable=no-member
class ImportTestMixin(object):
"""
Tests to run for both course and library import pages.
"""
def setUp(self):
super(ImportTestMixin, self).setUp()
self.import_page = self.import_page_class(*self.page_args())
self.landing_page = self.landing_page_class(*self.page_args())
self.import_page.visit()
@abstractmethod
def page_args(self):
"""
Generates the args for initializing a page object.
"""
return []
def test_upload(self):
"""
Scenario: I want to upload a course or library for import.
Given that I have a library or course to import into
And I have a valid .tar.gz file containing data to replace it with
I can select the file and upload it
And the page will give me confirmation that it uploaded successfully
"""
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
def test_import_timestamp(self):
"""
Scenario: I perform a course / library import
On import success, the page displays a UTC timestamp previously not visible
And if I refresh the page, the timestamp is still displayed
"""
self.assertFalse(self.import_page.is_timestamp_visible())
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
utc_now = datetime.utcnow()
import_date, import_time = self.import_page.timestamp
self.import_page.wait_for_timestamp_visible()
self.assertEqual(utc_now.strftime('%m/%d/%Y'), import_date)
self.assertEqual(utc_now.strftime('%H:%M'), import_time)
self.import_page.visit()
self.import_page.wait_for_tasks(completed=True)
self.import_page.wait_for_timestamp_visible()
def test_landing_url(self):
"""
Scenario: When uploading a library or course, a link appears for me to view the changes.
Given that I upload a library or course
A button will appear that contains the URL to the library or course's main page
"""
self.import_page.upload_tarball(self.tarball_name)
self.assertEqual(self.import_page.finished_target_url(), self.landing_page.url)
def test_bad_filename_error(self):
"""
Scenario: I should be reprimanded for trying to upload something that isn't a .tar.gz file.
Given that I select a file that is an .mp4 for upload
An error message will appear
"""
self.import_page.upload_tarball('funny_cat_video.mp4')
self.import_page.wait_for_filename_error()
def test_task_list(self):
"""
Scenario: I should see feedback checkpoints when uploading a course or library
Given that I am on an import page
No task checkpoint list should be showing
When I upload a valid tarball
Each task in the checklist should be marked confirmed
And the task list should be visible
"""
# The task list shouldn't be visible to start.
self.assertFalse(self.import_page.is_task_list_showing(), "Task list shown too early.")
self.import_page.wait_for_tasks()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_tasks(completed=True)
self.assertTrue(self.import_page.is_task_list_showing(), "Task list did not display.")
def test_bad_import(self):
"""
Scenario: I should see a failed checklist when uploading an invalid course or library
Given that I am on an import page
And I upload a tarball with a broken XML file
The tasks should be confirmed up until the 'Updating' task
And the 'Updating' task should be marked failed
And the remaining tasks should not be marked as started
"""
self.import_page.upload_tarball(self.bad_tarball_name)
self.import_page.wait_for_tasks(fail_on='Updating')
class TestCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = '2015.lzdwNM.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
def test_course_updated(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section'
When I visit the import page
And I upload a course that has a section named 'Section'
And I visit the course outline page again
The section named 'Section' should now be available
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There's a section named 'Section' in the tarball.
self.landing_page.section("Section")
def test_header(self):
"""
Scenario: I should see the correct text when importing a course.
Given that I have a course to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Course Import')
class TestLibraryImport(ImportTestMixin, StudioLibraryTest):
"""
Tests the Library import page
"""
tarball_name = 'library.HhJfPD.tar.gz'
bad_tarball_name = 'bad_library.tar.gz'
import_page_class = ImportLibraryPage
landing_page_class = LibraryEditPage
def page_args(self):
return [self.browser, self.library_key]
def test_library_updated(self):
"""
Given that I visit an empty library
No XBlocks should be shown
When I visit the import page
And I upload a library that contains three XBlocks
And I visit the library page
Three XBlocks should be shown
"""
self.landing_page.visit()
self.landing_page.wait_until_ready()
# No items should be in the library to start.
self.assertEqual(len(self.landing_page.xblocks), 0)
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
self.landing_page.wait_until_ready()
# There are three blocks in the tarball.
self.assertEqual(len(self.landing_page.xblocks), 3)
def test_header(self):
"""
Scenario: I should see the correct text when importing a library.
Given that I have a library to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Library Import')
| agpl-3.0 |
kkellerlbl/data_api | lib/doekbase/handle/Client.py | 5 | 8708 | ############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
try:
import json as _json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as _json
import requests as _requests
import urlparse as _urlparse
import random as _random
import base64 as _base64
from ConfigParser import ConfigParser as _ConfigParser
import os as _os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['access_token']
def _read_rcfile(file=_os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class AbstractHandle(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False):
if url is None:
url = 'http://localhost:7109'
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = _json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def new_handle(self):
resp = self._call('AbstractHandle.new_handle',
[])
return resp[0]
def localize_handle(self, h1, service_name):
resp = self._call('AbstractHandle.localize_handle',
[h1, service_name])
return resp[0]
def initialize_handle(self, h1):
resp = self._call('AbstractHandle.initialize_handle',
[h1])
return resp[0]
def persist_handle(self, h):
resp = self._call('AbstractHandle.persist_handle',
[h])
return resp[0]
def upload(self, infile):
resp = self._call('AbstractHandle.upload',
[infile])
return resp[0]
def download(self, h, outfile):
self._call('AbstractHandle.download',
[h, outfile])
def upload_metadata(self, h, infile):
self._call('AbstractHandle.upload_metadata',
[h, infile])
def download_metadata(self, h, outfile):
self._call('AbstractHandle.download_metadata',
[h, outfile])
def ids_to_handles(self, ids):
resp = self._call('AbstractHandle.ids_to_handles',
[ids])
return resp[0]
def hids_to_handles(self, hids):
resp = self._call('AbstractHandle.hids_to_handles',
[hids])
return resp[0]
def are_readable(self, arg_1):
resp = self._call('AbstractHandle.are_readable',
[arg_1])
return resp[0]
def is_readable(self, id):
resp = self._call('AbstractHandle.is_readable',
[id])
return resp[0]
def list_handles(self):
resp = self._call('AbstractHandle.list_handles',
[])
return resp[0]
def delete_handles(self, l):
self._call('AbstractHandle.delete_handles',
[l])
def give(self, user, perm, h):
self._call('AbstractHandle.give',
[user, perm, h])
def ids_to_handles(self, ids):
resp = self._call('AbstractHandle.ids_to_handles',
[ids])
return resp[0]
| mit |
asdofindia/kitsune | kitsune/forums/models.py | 15 | 13844 | import datetime
import time
from django.db import models
from django.db.models import Q
from django.db.models.signals import pre_save
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
from tidings.models import NotificationsMixin
from kitsune import forums
from kitsune.access import has_perm, perm_is_defined_on
from kitsune.flagit.models import FlaggedObject
from kitsune.sumo.helpers import urlparams, wiki_to_html
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.models import ModelBase
from kitsune.search.models import (
SearchMappingType, SearchMixin, register_for_indexing,
register_mapping_type)
def _last_post_from(posts, exclude_post=None):
"""Return the most recent post in the given set, excluding the given post.
If there are none, return None.
"""
if exclude_post:
posts = posts.exclude(id=exclude_post.id)
posts = posts.order_by('-created')
try:
return posts[0]
except IndexError:
return None
class ThreadLockedError(Exception):
"""Trying to create a post in a locked thread."""
class Forum(NotificationsMixin, ModelBase):
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True)
description = models.TextField(null=True)
last_post = models.ForeignKey('Post', related_name='last_post_in_forum',
null=True, on_delete=models.SET_NULL)
# Dictates the order in which forums are displayed in the forum list.
display_order = models.IntegerField(default=1, db_index=True)
# Whether or not this forum is visible in the forum list.
is_listed = models.BooleanField(default=True, db_index=True)
class Meta(object):
ordering = ['display_order', 'id']
permissions = (
('view_in_forum', 'Can view restricted forums'),
('post_in_forum', 'Can post in restricted forums'))
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('forums.threads', kwargs={'forum_slug': self.slug})
def allows_viewing_by(self, user):
"""Return whether a user can view me, my threads, and their posts."""
return (self._allows_public_viewing() or
has_perm(user, 'forums_forum.view_in_forum', self))
def _allows_public_viewing(self):
"""Return whether I am a world-readable forum.
If a django-authority permission relates to me, I am considered non-
public. (We assume that you attached a permission to me in order to
assign it to some users or groups.) Considered adding a Public flag to
this model, but we didn't want it to show up on form and thus be
accidentally flippable by readers of the Admin forum, who are all
privileged enough to do so.
"""
return not perm_is_defined_on('forums_forum.view_in_forum', self)
def allows_posting_by(self, user):
"""Return whether a user can make threads and posts in me."""
return (self._allows_public_posting() or
has_perm(user, 'forums_forum.post_in_forum', self))
def _allows_public_posting(self):
"""Return whether I am a world-writable forum."""
return not perm_is_defined_on('forums_forum.post_in_forum', self)
def update_last_post(self, exclude_thread=None, exclude_post=None):
"""Set my last post to the newest, excluding given thread and post."""
posts = Post.objects.filter(thread__forum=self)
if exclude_thread:
posts = posts.exclude(thread=exclude_thread)
self.last_post = _last_post_from(posts, exclude_post=exclude_post)
@classmethod
def authorized_forums_for_user(cls, user):
"""Returns the forums this user is authorized to view"""
return [f for f in Forum.objects.all() if f.allows_viewing_by(user)]
class Thread(NotificationsMixin, ModelBase, SearchMixin):
title = models.CharField(max_length=255)
forum = models.ForeignKey('Forum')
created = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
creator = models.ForeignKey(User)
last_post = models.ForeignKey('Post', related_name='last_post_in',
null=True, on_delete=models.SET_NULL)
replies = models.IntegerField(default=0)
is_locked = models.BooleanField(default=False)
is_sticky = models.BooleanField(default=False, db_index=True)
class Meta:
ordering = ['-is_sticky', '-last_post__created']
def __setattr__(self, attr, val):
"""Notice when the forum field changes.
A property won't do here, because it usurps the "forum" name and
prevents us from using lookups like Thread.objects.filter(forum=f).
"""
# When http://code.djangoproject.com/ticket/3148 adds nice getter and
# setter hooks, use those instead.
if attr == 'forum' and not hasattr(self, '_old_forum'):
try:
old = self.forum
except AttributeError: # When making a new Thread(forum=3), the
pass # forum attr doesn't exist yet.
else:
self._old_forum = old
super(Thread, self).__setattr__(attr, val)
@property
def last_page(self):
"""Returns the page number for the last post."""
return self.replies / forums.POSTS_PER_PAGE + 1
def __unicode__(self):
return self.title
def delete(self, *args, **kwargs):
"""Override delete method to update parent forum info."""
forum = Forum.objects.get(pk=self.forum.id)
if forum.last_post and forum.last_post.thread_id == self.id:
forum.update_last_post(exclude_thread=self)
forum.save()
super(Thread, self).delete(*args, **kwargs)
def new_post(self, author, content):
"""Create a new post, if the thread is unlocked."""
if self.is_locked:
raise ThreadLockedError
return self.post_set.create(author=author, content=content)
def get_absolute_url(self):
return reverse('forums.posts', args=[self.forum.slug, self.id])
def get_last_post_url(self):
query = {'last': self.last_post_id}
page = self.last_page
if page > 1:
query['page'] = page
url = reverse('forums.posts', args=[self.forum.slug, self.id])
return urlparams(url, hash='post-%s' % self.last_post_id, **query)
def save(self, *args, **kwargs):
super(Thread, self).save(*args, **kwargs)
old_forum = getattr(self, '_old_forum', None)
new_forum = self.forum
if old_forum and old_forum != new_forum:
old_forum.update_last_post(exclude_thread=self)
old_forum.save()
new_forum.update_last_post()
new_forum.save()
del self._old_forum
def update_last_post(self, exclude_post=None):
"""Set my last post to the newest, excluding the given post."""
last = _last_post_from(self.post_set, exclude_post=exclude_post)
self.last_post = last
# If self.last_post is None, and this was called from Post.delete,
# then Post.delete will erase the thread, as well.
@classmethod
def get_mapping_type(cls):
return ThreadMappingType
@register_mapping_type
class ThreadMappingType(SearchMappingType):
@classmethod
def search(cls):
return super(ThreadMappingType, cls).search().order_by('created')
@classmethod
def get_model(cls):
return Thread
@classmethod
def get_query_fields(cls):
return ['post_title', 'post_content']
@classmethod
def get_mapping(cls):
return {
'properties': {
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'created': {'type': 'integer'},
'updated': {'type': 'integer'},
'post_forum_id': {'type': 'integer'},
'post_title': {'type': 'string', 'analyzer': 'snowball'},
'post_is_sticky': {'type': 'boolean'},
'post_is_locked': {'type': 'boolean'},
'post_author_id': {'type': 'integer'},
'post_author_ord': {'type': 'string', 'index': 'not_analyzed'},
'post_content': {'type': 'string', 'analyzer': 'snowball',
'store': 'yes',
'term_vector': 'with_positions_offsets'},
'post_replies': {'type': 'integer'}
}
}
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Extracts interesting thing from a Thread and its Posts"""
if obj is None:
model = cls.get_model()
obj = model.objects.select_related('last_post').get(pk=obj_id)
d = {}
d['id'] = obj.id
d['model'] = cls.get_mapping_type_name()
d['url'] = obj.get_absolute_url()
d['indexed_on'] = int(time.time())
# TODO: Sphinx stores created and updated as seconds since the
# epoch, so we convert them to that format here so that the
# search view works correctly. When we ditch Sphinx, we should
# see if it's faster to filter on ints or whether we should
# switch them to dates.
d['created'] = int(time.mktime(obj.created.timetuple()))
if obj.last_post is not None:
d['updated'] = int(time.mktime(obj.last_post.created.timetuple()))
else:
d['updated'] = None
d['post_forum_id'] = obj.forum.id
d['post_title'] = obj.title
d['post_is_sticky'] = obj.is_sticky
d['post_is_locked'] = obj.is_locked
d['post_replies'] = obj.replies
author_ids = set()
author_ords = set()
content = []
posts = Post.objects.filter(
thread_id=obj.id).select_related('author')
for post in posts:
author_ids.add(post.author.id)
author_ords.add(post.author.username)
content.append(post.content)
d['post_author_id'] = list(author_ids)
d['post_author_ord'] = list(author_ords)
d['post_content'] = content
return d
register_for_indexing('forums', Thread)
class Post(ModelBase):
thread = models.ForeignKey('Thread')
content = models.TextField()
author = models.ForeignKey(User)
created = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
updated = models.DateTimeField(default=datetime.datetime.now,
db_index=True)
updated_by = models.ForeignKey(User,
related_name='post_last_updated_by',
null=True)
flags = generic.GenericRelation(FlaggedObject)
class Meta:
ordering = ['created']
def __unicode__(self):
return self.content[:50]
def save(self, *args, **kwargs):
"""
Override save method to update parent thread info and take care of
created and updated.
"""
new = self.id is None
if not new:
self.updated = datetime.datetime.now()
super(Post, self).save(*args, **kwargs)
if new:
self.thread.replies = self.thread.post_set.count() - 1
self.thread.last_post = self
self.thread.save()
self.thread.forum.last_post = self
self.thread.forum.save()
def delete(self, *args, **kwargs):
"""Override delete method to update parent thread info."""
thread = Thread.objects.get(pk=self.thread.id)
if thread.last_post_id and thread.last_post_id == self.id:
thread.update_last_post(exclude_post=self)
thread.replies = thread.post_set.count() - 2
thread.save()
forum = Forum.objects.get(pk=thread.forum.id)
if forum.last_post_id and forum.last_post_id == self.id:
forum.update_last_post(exclude_post=self)
forum.save()
super(Post, self).delete(*args, **kwargs)
# If I was the last post in the thread, delete the thread.
if thread.last_post is None:
thread.delete()
@property
def page(self):
"""Get the page of the thread on which this post is found."""
t = self.thread
earlier = t.post_set.filter(created__lte=self.created).count() - 1
if earlier < 1:
return 1
return earlier / forums.POSTS_PER_PAGE + 1
def get_absolute_url(self):
query = {}
if self.page > 1:
query = {'page': self.page}
url_ = self.thread.get_absolute_url()
return urlparams(url_, hash='post-%s' % self.id, **query)
@property
def content_parsed(self):
return wiki_to_html(self.content)
register_for_indexing('forums', Post, instance_to_indexee=lambda p: p.thread)
def user_pre_save(sender, instance, **kw):
"""When a user's username is changed, we must reindex the threads
they participated in.
"""
if instance.id:
user = User.objects.get(id=instance.id)
if user.username != instance.username:
threads = (
Thread.objects
.filter(
Q(creator=instance) |
Q(post__author=instance))
.only('id')
.distinct())
for t in threads:
t.index_later()
pre_save.connect(
user_pre_save, sender=User, dispatch_uid='forums_user_pre_save')
| bsd-3-clause |
ruba9/HotelChatbot | my_env/lib/python3.6/site-packages/pip/_vendor/colorama/winterm.py | 578 | 6290 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
| mit |
FrozenCow/FIRE-ICE | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
chbpku/rpi.sessdsa | 代码/11 多功能篮球计分器/nightlight/entities/digital_display_tm1637.py | 12 | 3369 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import RPi.GPIO as GPIO
import re
from .ic_tm1637 import IC_TM1637 as IC_TM1637
class DigitalDisplayTM1637(object):
'''
Digital display class
'''
__ic_tm1637 = None
__numbers = []
__number_code = [0x3f, 0x06, 0x5b, 0x4f, 0x66, 0x6d, 0x7d, 0x07, 0x7f, 0x6f, 0x00, 0x40]
__address_code = [0xc0, 0xc1, 0xc2, 0xc3]
__is_on = False
def __init__(self, pins, real_true = GPIO.HIGH):
'''
Init the digital display
:param pin: pin numbers in array
:param real_true: GPIO.HIGH or GPIO.LOW
:return: void
'''
self.__ic_tm1637 = IC_TM1637(pins, real_true)
#Stauts.
@property
def is_on(self):
'''
Get the current status of the digital display
'''
return self.__is_on
@property
def numbers(self):
'''
Get the current numbers array showing
:return: numbers array
'''
return self.__numbers
#@numbers.setter
def set_numbers(self, value):
'''
Set the numbers array to show
:return: void
'''
pattern = re.compile(r'[-|#|\d]\.?')
matches = pattern.findall(value)
#del self.__numbers
self.__numbers = []
for i in range(len(matches)):
self.__numbers.append(matches[i])
#print(self.__numbers)
#@numbers.deleter
#def numbers(self):
# del self.__numbers
@property
def ic(self):
'''
Return the instance of ic
:return: ic
'''
return self.__ic_tm1637
#Verbs.
def on(self):
'''
Set display on
:return: void
'''
self.__ic_tm1637.set_command(0x8f)
self.__is_on = True
def off(self):
'''
Set display off
:return: void
'''
self.__ic_tm1637.clear()
self.__is_on = False
def show(self, str):
'''
Set the numbers array to show and enable the display
:return: void
'''
self.set_numbers(str)
#print(self.__numbers)
self.__ic_tm1637.set_command(0x44)
for i in range(min(4, len(self.__numbers))):
dp = True if self.__numbers[i].count('.') > 0 else False
num = self.__numbers[i].replace('.','')
if num == '#':
num = 10
elif num == '-':
num = 11
else:
num = int(num)
if dp:
self.__ic_tm1637.set_data(self.__address_code[i], self.__number_code[num]|0x80)
else:
self.__ic_tm1637.set_data(self.__address_code[i], self.__number_code[num])
self.on()
| gpl-3.0 |
hamish2014/FreeCAD_assembly2 | assembly2/utils/muxAssembly.py | 1 | 6431 | from assembly2.core import *
import Part
import os, numpy
class Proxy_muxAssemblyObj:
def execute(self, shape):
pass
def muxObjects(doc, mode=0):
'combines all the imported shape object in doc into one shape'
faces = []
if mode == 1:
objects = doc.getSelection()
else:
objects = doc.Objects
for obj in objects:
if 'importPart' in obj.Content:
debugPrint(3, ' - parsing "%s"' % (obj.Name))
if hasattr(obj, 'Shape'):
faces = faces + obj.Shape.Faces
return Part.makeShell(faces)
def muxMapColors(doc, muxedObj, mode=0):
'call after muxedObj.Shape = muxObjects(doc)'
diffuseColors = []
faceMap = {}
if mode == 1:
objects = doc.getSelection()
else:
objects = doc.Objects
for obj in objects:
if 'importPart' in obj.Content and hasattr(obj, 'Shape'):
for i, face in enumerate(obj.Shape.Faces):
if i < len(obj.ViewObject.DiffuseColor):
clr = obj.ViewObject.DiffuseColor[i]
else:
clr = obj.ViewObject.DiffuseColor[0]
faceMap[faceMapKey(face)] = clr
for f in muxedObj.Shape.Faces:
try:
key = faceMapKey(f)
clr = faceMap[key]
del faceMap[key]
except KeyError:
debugPrint(3, 'muxMapColors: waring no faceMap entry for %s - key %s' % (f,faceMapKey(f)))
clr = muxedObj.ViewObject.ShapeColor
diffuseColors.append( clr )
muxedObj.ViewObject.DiffuseColor = diffuseColors
def faceMapKey(face):
c = sum([ [ v.Point.x, v.Point.y, v.Point.z] for v in face.Vertexes ], [])
return tuple(c)
def createMuxedAssembly(name=None):
partName='muxedAssembly'
if name!=None:
partName = name
debugPrint(2, 'creating assembly mux "%s"' % (partName))
muxedObj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",partName)
muxedObj.Proxy = Proxy_muxAssemblyObj()
muxedObj.ViewObject.Proxy = 0
muxedObj.addProperty("App::PropertyString","type")
muxedObj.type = 'muxedAssembly'
muxedObj.addProperty("App::PropertyBool","ReadOnly")
muxedObj.ReadOnly = False
FreeCADGui.ActiveDocument.getObject(muxedObj.Name).Visibility = False
muxedObj.addProperty("App::PropertyStringList","muxedObjectList")
tmplist=[]
for objlst in FreeCADGui.Selection.getSelection():
if 'importPart' in objlst.Content:
tmplist.append(objlst.Name)
muxedObj.muxedObjectList=tmplist
if len(tmplist)>0:
#there are objects selected, mux them
muxedObj.Shape = muxObjects(FreeCADGui.Selection, 1)
muxMapColors(FreeCADGui.Selection, muxedObj, 1)
else:
#mux all objects (original behavior)
for objlst in FreeCAD.ActiveDocument.Objects:
if 'importPart' in objlst.Content:
tmplist.append(objlst.Name)
muxedObj.muxedObjectList=tmplist
if len(tmplist)>0:
muxedObj.Shape = muxObjects(FreeCAD.ActiveDocument, 0)
muxMapColors(FreeCAD.ActiveDocument, muxedObj, 0)
else:
debugPrint(2, 'Nothing to Mux')
class MuxAssemblyCommand:
def Activated(self):
#we have to handle the mux name here
createMuxedAssembly()
FreeCAD.ActiveDocument.recompute()
def GetResources(self):
msg = 'Combine all parts into a single object \n\
or combine all selected parts into a single object\n(for example to create a drawing of the whole or part of the assembly)'
return {
'Pixmap' : ':/assembly2/icons/muxAssembly.svg',
'MenuText': msg,
'ToolTip': msg
}
class MuxAssemblyRefreshCommand:
def Activated(self):
#first list all muxes in active document
allMuxesList=[]
for objlst in FreeCAD.ActiveDocument.Objects:
if hasattr(objlst,'type'):
if 'muxedAssembly' in objlst.type:
if objlst.ReadOnly==False:
allMuxesList.append(objlst.Name)
#Second, create a list of selected objects and check if there is a mux
allSelMuxesList=[]
for objlst in FreeCADGui.Selection.getSelection():
tmpobj = FreeCAD.ActiveDocument.getObject(objlst.Name)
if 'muxedAssembly' in tmpobj.type:
if tmpobj.ReadOnly==False:
allSelMuxesList.append(objlst.Name)
refreshMuxesList=[]
if len(allSelMuxesList) > 0 :
refreshMuxesList=allSelMuxesList
debugPrint(2, 'there are %d muxes in selected objects' % len(allSelMuxesList))
else:
if len(allMuxesList) > 0 :
debugPrint(2, 'there are %d muxes in Active Document' % len(allMuxesList))
refreshMuxesList=allMuxesList
#ok there are at least 1 mux to refresh, we have to retrieve the object list for each mux
if len(refreshMuxesList)>0:
FreeCADGui.Selection.clearSelection()
for muxesobj in refreshMuxesList:
for newselobjs in FreeCAD.ActiveDocument.getObject(muxesobj).muxedObjectList:
FreeCADGui.Selection.addSelection(FreeCAD.ActiveDocument.getObject(newselobjs))
tmpstr=FreeCAD.ActiveDocument.getObject(muxesobj).Label
FreeCAD.ActiveDocument.removeObject(muxesobj)
debugPrint(2, 'Refreshing Assembly Mux '+muxesobj)
createMuxedAssembly(tmpstr)
else:
debugPrint(2, 'there are no muxes in Active Document' )
FreeCADGui.Selection.clearSelection()
FreeCAD.ActiveDocument.recompute()
def GetResources(self):
msg = 'Refresh all muxedAssembly\n\
or refresh all selected muxedAssembly\n\
use the ReadOnly property to avoid accidental refresh'
return {
'Pixmap' : ':/assembly2/icons/muxAssemblyRefresh.svg',
'MenuText': msg,
'ToolTip': msg
}
FreeCADGui.addCommand('assembly2_muxAssembly', MuxAssemblyCommand())
FreeCADGui.addCommand('assembly2_muxAssemblyRefresh', MuxAssemblyRefreshCommand())
| lgpl-2.1 |
Scapogo/zipline | zipline/finance/performance/position.py | 2 | 7335 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Position Tracking
=================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| asset | the asset held in this position |
+-----------------+----------------------------------------------------+
| amount | whole number of shares in the position |
+-----------------+----------------------------------------------------+
| last_sale_price | price at last sale of the asset on the exchange |
+-----------------+----------------------------------------------------+
| cost_basis | the volume weighted average price paid per share |
+-----------------+----------------------------------------------------+
"""
from __future__ import division
from math import copysign
from collections import OrderedDict
import numpy as np
import logbook
from zipline.assets import Future, Asset
from zipline.utils.input_validation import expect_types
log = logbook.Logger('Performance')
class Position(object):
@expect_types(asset=Asset)
def __init__(self, asset, amount=0, cost_basis=0.0,
last_sale_price=0.0, last_sale_date=None):
self.asset = asset
self.amount = amount
self.cost_basis = cost_basis # per share
self.last_sale_price = last_sale_price
self.last_sale_date = last_sale_date
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'amount': self.amount * dividend.amount
}
def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'payment_asset': stock_dividend.payment_asset,
'share_count': np.floor(
self.amount * float(stock_dividend.ratio)
)
}
@expect_types(asset=Asset)
def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash
def update(self, txn):
if self.asset != txn.asset:
raise Exception('updating position with txn for a '
'different asset')
total_shares = self.amount + txn.amount
if total_shares == 0:
self.cost_basis = 0.0
else:
prev_direction = copysign(1, self.amount)
txn_direction = copysign(1, txn.amount)
if prev_direction != txn_direction:
# we're covering a short or closing a position
if abs(txn.amount) > abs(self.amount):
# we've closed the position and gone short
# or covered the short position and gone long
self.cost_basis = txn.price
else:
prev_cost = self.cost_basis * self.amount
txn_cost = txn.amount * txn.price
total_cost = prev_cost + txn_cost
self.cost_basis = total_cost / total_shares
# Update the last sale price if txn is
# best data we have so far
if self.last_sale_date is None or txn.dt > self.last_sale_date:
self.last_sale_price = txn.price
self.last_sale_date = txn.dt
self.amount = total_shares
@expect_types(asset=Asset)
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount
def __repr__(self):
template = "asset: {asset}, amount: {amount}, cost_basis: {cost_basis}, \
last_sale_price: {last_sale_price}"
return template.format(
asset=self.asset,
amount=self.amount,
cost_basis=self.cost_basis,
last_sale_price=self.last_sale_price
)
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.asset,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
}
class positiondict(OrderedDict):
def __missing__(self, key):
return None
| apache-2.0 |
EricSchles/pattern | pattern/db/__init__.py | 1 | 97989 | #### PATTERN | DB ##################################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import os
import sys
import warnings
import re
import htmlentitydefs
import urllib
import csv as csvlib
from cStringIO import StringIO
from codecs import BOM_UTF8
from datetime import datetime, timedelta
from time import mktime, strftime
from math import sqrt
from types import GeneratorType
try:
from email.utils import parsedate_tz, mktime_tz
except:
from email.Utils import parsedate_tz, mktime_tz
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
MYSQL = "mysql"
SQLITE = "sqlite"
def _import_db(engine=SQLITE):
""" Lazy import called from Database() or Database.new().
Depending on the type of database we either import MySQLdb or SQLite.
Note: 64-bit Python needs 64-bit MySQL, 32-bit the 32-bit version.
"""
global MySQLdb
global sqlite
if engine == MYSQL:
import MySQLdb
warnings.simplefilter("ignore", MySQLdb.Warning)
if engine == SQLITE:
try:
# Python 2.5+
import sqlite3.dbapi2 as sqlite
except:
# Python 2.4 with pysqlite2
import pysqlite2.dbapi2 as sqlite
def find(match=lambda item: False, list=[]):
""" Returns the first item in the list for which match(item) is True.
"""
for item in list:
if match(item) is True:
return item
_sum = sum # pattern.db.sum() is also a column aggregate function.
#### DATE FUNCTIONS ################################################################################
NOW, YEAR = "now", datetime.now().year
# Date formats can be found in the Python documentation:
# http://docs.python.org/library/time.html#time.strftime
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
date_formats = [
DEFAULT_DATE_FORMAT, # 2010-09-21 09:27:01 => SQLite + MySQL
"%Y-%m-%dT%H:%M:%SZ", # 2010-09-20T09:27:01Z => Bing
"%Y-%m-%dT%H:%M:%S+0000", # 2010-09-20T09:27:01+0000 => Facebook
"%Y-%m-%d %H:%M", # 2010-09-21 09:27
"%Y-%m-%d", # 2010-09-21
"%d/%m/%Y", # 21/09/2010
"%d %B %Y", # 21 September 2010
"%B %d %Y", # September 21 2010
"%B %d, %Y", # September 21, 2010
]
class DateError(Exception):
pass
class Date(datetime):
""" A convenience wrapper for datetime.datetime with a default string format.
"""
format = DEFAULT_DATE_FORMAT
# Date.year
# Date.month
# Date.day
# Date.minute
# Date.second
@property
def minutes(self):
return self.minute
@property
def seconds(self):
return self.second
@property
def microseconds(self):
return self.microsecond
@property
def week(self):
return self.isocalendar()[1]
@property
def weekday(self):
return self.isocalendar()[2]
@property
def timestamp(self):
return int(mktime(self.timetuple())) # Seconds elapsed since 1/1/1970.
def strftime(self, format):
if self.year < 1900:
# Python's strftime() doesn't handle year < 1900:
return strftime(format, (1900,) + self.timetuple()[1:]).replace("1900", str(self.year), 1)
return datetime.strftime(self, format)
def copy(self):
return date(self.timestamp)
def __str__(self):
return self.strftime(self.format)
def __repr__(self):
return "Date(%s)" % repr(self.__str__())
def __iadd__(self, time):
return self.__add__(time)
def __isub__(self, time):
return self.__sub__(time)
def __add__(self, time):
d = datetime.__add__(self, time)
return date(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, self.format)
def __sub__(self, time):
d = datetime.__sub__(self, time)
if isinstance(d, timedelta):
# Subtracting two dates returns a time().
return d
return date(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, self.format)
def date(*args, **kwargs):
""" Returns a Date from the given parameters:
- date(format=Date.format) => now
- date(int)
- date(string)
- date(string, format=Date.format)
- date(string, inputformat, format=Date.format)
- date(year, month, day, format=Date.format)
- date(year, month, day, hours, minutes, seconds, format=Date.format)
If a string is given without an explicit input format, all known formats will be tried.
"""
d = None
f = None
if len(args) == 0 or args[0] == NOW:
# No parameters or one parameter NOW.
d = Date.now()
elif len(args) == 1 \
and (isinstance(args[0], int) \
or isinstance(args[0], basestring) and args[0].isdigit()):
# One parameter, an int or string timestamp.
d = Date.fromtimestamp(int(args[0]))
elif len(args) == 1 and isinstance(args[0], basestring):
# One parameter, a date string for which we guess the input format (RFC2822 or known formats).
try: d = Date.fromtimestamp(mktime_tz(parsedate_tz(args[0])))
except:
for format in ("format" in kwargs and [kwargs["format"]] or []) + date_formats:
try: d = Date.strptime(args[0], format); break
except:
pass
if d is None:
raise DateError, "unknown date format for %s" % repr(args[0])
elif len(args) == 2 and isinstance(args[0], basestring):
# Two parameters, a date string and an explicit input format.
d = Date.strptime(args[0], args[1])
elif len(args) >= 3:
# 3-6 parameters: year, month, day, hours, minutes, seconds.
f = kwargs.pop("format", None)
d = Date(*args[:7], **kwargs)
else:
raise DateError, "unknown date format"
d.format = kwargs.get("format") or len(args)>7 and args[7] or f or Date.format
return d
def time(days=0, seconds=0, minutes=0, hours=0, **kwargs):
""" Returns a value that can be added to a Date object.
"""
# Other parameters: microseconds, milliseconds, weeks.
# There is no months-parameter since months have a variable amount of days (28-31).
# To increase the month of a Date:
# Date(date.year, date.month+1, date.day, format=date.format)
return timedelta(days=days, seconds=seconds, minutes=minutes, hours=hours, **kwargs)
#### STRING FUNCTIONS ##############################################################################
# Latin-1 (ISO-8859-1) encoding is identical to Windows-1252 except for the code points 128-159:
# Latin-1 assigns control codes in this range, Windows-1252 has characters, punctuation, symbols
# assigned to these code points.
def decode_string(v, encoding="utf-8"):
""" Returns the given value as a Unicode string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try: return v.decode(*e)
except:
pass
return v
return unicode(v)
def encode_string(v, encoding="utf-8"):
""" Returns the given value as a Python byte string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try: return v.encode(*e)
except:
pass
return v
return str(v)
decode_utf8 = decode_string
encode_utf8 = encode_string
def string(value, default=""):
""" Returns the value cast to unicode, or default if it is None/empty.
"""
# Useful for HTML interfaces.
if value is None or value == "": # Don't do value != None because this includes 0.
return default
return decode_utf8(value)
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, (str, unicode)):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == '' :
return unichr(int(name)) # "&" => "&"
if hex in ("x","X"):
return unichr(int('0x'+name, 16)) # "&" = > "&"
else:
cp = htmlentitydefs.name2codepoint.get(name) # "&" => "&"
return cp and unichr(cp) or match.group() # "&foo;" => "&foo;"
if isinstance(string, (str, unicode)):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
class _Binary:
""" A wrapper for BLOB data with engine-specific encoding.
See also: Database.binary().
"""
def __init__(self, data, type=SQLITE):
self.data, self.type = str(hasattr(data, "read") and data.read() or data), type
def escape(self):
if self.type == SQLITE:
return str(self.data.encode("string-escape")).replace("'","''")
if self.type == MYSQL:
return MySQLdb.escape_string(self.data)
def _escape(value, quote=lambda string: "'%s'" % string.replace("'", "\\'")):
""" Returns the quoted, escaped string (e.g., "'a bird\'s feathers'") for database entry.
Anything that is not a string (e.g., an integer) is converted to string.
Booleans are converted to "0" and "1", None is converted to "null".
See also: Database.escape()
"""
# Note: use Database.escape() for MySQL/SQLITE-specific escape.
if isinstance(value, str):
# Strings are encoded as UTF-8.
try: value = value.encode("utf-8")
except:
pass
if value in ("current_timestamp",):
# Don't quote constants such as current_timestamp.
return value
if isinstance(value, basestring):
# Strings are quoted, single quotes are escaped according to the database engine.
return quote(value)
if isinstance(value, bool):
# Booleans are converted to "0" or "1".
return str(int(value))
if isinstance(value, (int, long, float)):
# Numbers are converted to string.
return str(value)
if isinstance(value, datetime):
# Dates are formatted as string.
return quote(value.strftime(DEFAULT_DATE_FORMAT))
if isinstance(value, type(None)):
# None is converted to NULL.
return "null"
if isinstance(value, Query):
# A Query is converted to "("+Query.SQL()+")" (=subquery).
return "(%s)" % value.SQL().rstrip(";")
if isinstance(value, _Binary):
# Binary data is escaped with attention to null bytes.
return "'%s'" % value.escape()
return value
#### LIST FUNCTIONS ################################################################################
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), cmp=f, reverse=reverse)
_order = order
def avg(list):
""" Returns the arithmetic mean of the given list of values.
For example: mean([1,2,3,4]) = 10/4 = 2.5.
"""
return float(_sum(list)) / (len(list) or 1)
def variance(list):
""" Returns the variance of the given list of values.
The variance is the average of squared deviations from the mean.
"""
a = avg(list)
return _sum([(x-a)**2 for x in list]) / (len(list)-1 or 1)
def stdev(list):
""" Returns the standard deviation of the given list of values.
Low standard deviation => values are close to the mean.
High standard deviation => values are spread out over a large range.
"""
return sqrt(variance(list))
#### SQLITE FUNCTIONS ##############################################################################
# Convenient MySQL functions not in in pysqlite2. These are created at each Database.connect().
class sqlite_first(list):
def step(self, value): self.append(value)
def finalize(self):
return self[0]
class sqlite_last(list):
def step(self, value): self.append(value)
def finalize(self):
return self[-1]
class sqlite_group_concat(list):
def step(self, value): self.append(value)
def finalize(self):
return ",".join(string(v) for v in self if v is not None)
# SQLite (and MySQL) date string format:
# yyyy-mm-dd hh:mm:ss
def sqlite_year(datestring):
return int(datestring.split(" ")[0].split("-")[0])
def sqlite_month(datestring):
return int(datestring.split(" ")[0].split("-")[1])
def sqlite_day(datestring):
return int(datestring.split(" ")[0].split("-")[2])
def sqlite_hour(datestring):
return int(datestring.split(" ")[1].split(":")[0])
def sqlite_minute(datestring):
return int(datestring.split(" ")[1].split(":")[1])
def sqlite_second(datestring):
return int(datestring.split(" ")[1].split(":")[2])
#### DATABASE ######################################################################################
class DatabaseConnectionError(Exception):
pass
class Database(object):
class Tables(dict):
# Table objects are lazily constructed when retrieved.
# This saves time because each table executes a metadata query when constructed.
def __init__(self, db, *args, **kwargs):
dict.__init__(self, *args, **kwargs); self.db=db
def __getitem__(self, k):
if dict.__getitem__(self, k) is None:
dict.__setitem__(self, k, Table(name=k, database=self.db))
return dict.__getitem__(self, k)
def __init__(self, name, host="localhost", port=3306, username="root", password="", type=SQLITE, unicode=True, **kwargs):
""" A collection of tables stored in an SQLite or MySQL database.
If the database does not exist, creates it.
If the host, user or password is wrong, raises DatabaseConnectionError.
"""
_import_db(type)
self.type = type
self.name = name
self.host = host
self.port = port
self.username = kwargs.get("user", username)
self.password = password
self._connection = None
self.connect(unicode)
# Table names are available in the Database.tables dictionary,
# table objects as attributes (e.g. Database.table_name).
q = self.type==SQLITE and "select name from sqlite_master where type='table';" or "show tables;"
self.tables = Database.Tables(self)
for name, in self.execute(q):
if not name.startswith(("sqlite_",)):
self.tables[name] = None
# The SQL syntax of the last query is kept in cache.
self._query = None
# Persistent relations between tables, stored as (table1, table2, key1, key2, join) tuples.
self.relations = []
def connect(self, unicode=True):
# Connections for threaded applications work differently,
# see http://tools.cherrypy.org/wiki/Databases
# (have one Database object for each thread).
if self._connection is not None:
return
# MySQL
if self.type == MYSQL:
try:
self._connection = MySQLdb.connect(self.host, self.username, self.password, self.name, port=self.port, use_unicode=unicode)
self._connection.autocommit(False)
except Exception, e:
# Create the database if it doesn't exist yet.
if "unknown database" not in str(e).lower():
raise DatabaseConnectionError, e[1] # Wrong host, username and/or password.
connection = MySQLdb.connect(self.host, self.username, self.password)
cursor = connection.cursor()
cursor.execute("create database if not exists `%s`;" % self.name)
cursor.close()
connection.close()
self._connection = MySQLdb.connect(self.host, self.username, self.password, self.name, port=self.port, use_unicode=unicode)
self._connection.autocommit(False)
if unicode:
self._connection.set_character_set("utf8")
# SQLite
if self.type == SQLITE:
self._connection = sqlite.connect(self.name, detect_types=sqlite.PARSE_DECLTYPES)
# Create functions that are not natively supported by the engine.
# Aggregate functions (for grouping rows) + date functions.
self._connection.create_aggregate("first", 1, sqlite_first)
self._connection.create_aggregate("last", 1, sqlite_last)
self._connection.create_aggregate("group_concat", 1, sqlite_group_concat)
self._connection.create_function("year", 1, sqlite_year)
self._connection.create_function("month", 1, sqlite_month)
self._connection.create_function("day", 1, sqlite_day)
self._connection.create_function("hour", 1, sqlite_hour)
self._connection.create_function("minute", 1, sqlite_minute)
self._connection.create_function("second", 1, sqlite_second)
# Map field type INTEGER to int (not long(), e.g., 1L).
# Map field type BOOLEAN to bool.
# Map field type DATE to str, yyyy-mm-dd hh:mm:ss.
if self.type == MYSQL:
type = MySQLdb.constants.FIELD_TYPE
self._connection.converter[type.LONG] = int
self._connection.converter[type.LONGLONG] = int
self._connection.converter[type.DECIMAL] = float
self._connection.converter[type.NEWDECIMAL] = float
self._connection.converter[type.TINY] = bool
self._connection.converter[type.TIMESTAMP] = date
if self.type == SQLITE:
sqlite.converters["TINYINT(1)"] = bool # See Binary() why this is necessary:
sqlite.converters["BLOB"] = lambda data: str(data).decode("string-escape")
sqlite.converters["TIMESTAMP"] = date
def disconnect(self):
if self._connection is not None:
self._connection.commit()
self._connection.close()
self._connection = None
@property
def connection(self):
return self._connection
@property
def connected(self):
return self._connection is not None
def __getattr__(self, k):
""" Tables are available as attributes by name, e.g., Database.persons.
"""
if k in self.__dict__["tables"]:
return self.__dict__["tables"][k]
if k in self.__dict__:
return self.__dict__[k]
raise AttributeError, "'Database' object has no attribute '%s'" % k
def __len__(self):
return len(self.tables)
def __iter__(self):
return iter(self.tables.keys())
def __getitem__(self, table):
return self.tables[table]
def __setitem__(self, table, fields):
self.create(table, fields)
def __delitem__(self, table):
self.drop(table)
def __nonzero__(self):
return True
# Backwards compatibility.
def _get_user(self):
return self.username
def _set_user(self, v):
self.username = v
user = property(_get_user, _set_user)
@property
def query(self):
""" Yields the last executed SQL query as a string.
"""
return self._query
def execute(self, SQL, commit=False):
""" Executes the given SQL query and return an iterator over the rows.
With commit=True, automatically commits insert/update/delete changes.
"""
class rowiterator:
def __init__(self, cursor):
self._cursor = cursor
def next(self):
return self.__iter__().next()
def __iter__(self):
for row in (hasattr(self._cursor, "__iter__") and self._cursor or self._cursor.fetchall()):
yield row
self._cursor.close()
def __del__(self):
self._cursor.close()
self._query = SQL
if not SQL:
return # MySQL doesn't like empty queries.
#print SQL
cursor = self._connection.cursor()
cursor.execute(SQL)
if commit is not False:
self._connection.commit()
return rowiterator(cursor)
def commit(self):
""" Commit all pending insert/update/delete changes.
"""
self._connection.commit()
def rollback(self):
""" Discard changes since the last commit.
"""
self._connection.rollback()
def escape(self, value):
""" Returns the quoted, escaped string (e.g., "'a bird\'s feathers'") for database entry.
Anything that is not a string (e.g., an integer) is converted to string.
Booleans are converted to "0" and "1", None is converted to "null".
"""
def quote(string):
# How to escape strings differs between database engines.
if self.type == MYSQL:
#return "'%s'" % self._connection.escape_string(string) # Doesn't like Unicode.
return "'%s'" % string.replace("'", "\\'")
if self.type == SQLITE:
return "'%s'" % string.replace("'", "''")
return _escape(value, quote)
def binary(self, data):
""" Returns the string of binary data as a value that can be inserted in a BLOB field.
"""
return _Binary(data, self.type)
blob = binary
def _field_SQL(self, table, field):
# Returns a (field, index)-tuple with SQL strings for the given field().
# The field string can be used in a CREATE TABLE or ALTER TABLE statement.
# The index string is an optional CREATE INDEX statement (or None).
auto = " auto%sincrement" % (self.type == MYSQL and "_" or "")
field = isinstance(field, basestring) and [field, STRING(255)] or field
field = list(field) + [STRING, None, False, True][len(field)-1:]
field = list(_field(field[0], field[1], default=field[2], index=field[3], optional=field[4]))
if field[1] == "timestamp" and field[2] == "now":
field[2] = "current_timestamp"
a = b = None
a = "`%s` %s%s%s%s" % (
# '`id` integer not null primary key auto_increment'
field[0],
field[1] == STRING and field[1]() or field[1],
field[4] is False and " not null" or " null",
field[2] is not None and " default %s" % self.escape(field[2]) or "",
field[3] == PRIMARY and " primary key%s" % ("", auto)[field[1]==INTEGER] or "")
if field[3] in (UNIQUE, True):
b = "create %sindex `%s_%s` on `%s` (`%s`);" % (
field[3] == UNIQUE and "unique " or "", table, field[0], table, field[0])
return a, b
def create(self, table, fields=[], encoding="utf-8", **kwargs):
""" Creates a new table with the given fields.
The given list of fields must contain values returned from the field() function.
"""
if table in self.tables:
raise TableError, "table '%s' already exists" % (self.name + "." + table)
if table.startswith(XML_HEADER):
# From an XML-string generated with Table.xml.
return parse_xml(self, table,
table = kwargs.get("name"),
field = kwargs.get("field", lambda s: s.replace(".", "_")))
encoding = self.type == MYSQL and " default charset=" + encoding.replace("utf-8", "utf8") or ""
fields, indices = zip(*[self._field_SQL(table, f) for f in fields])
self.execute("create table `%s` (%s)%s;" % (table, ", ".join(fields), encoding))
for index in indices:
if index is not None:
self.execute(index, commit=True)
self.tables[table] = None # lazy loading
return self.tables[table]
def drop(self, table):
""" Removes the table with the given name.
"""
if isinstance(table, Table) and table.db == self:
table = table.name
if table in self.tables:
self.tables[table].database = None
self.tables.pop(table)
self.execute("drop table `%s`;" % table, commit=True)
# The SQLite version in Python 2.5 has a drop/recreate table bug.
# Reconnect. This means that any reference to Database.connection
# is no longer valid after Database.drop().
if self.type == SQLITE and sys.version < "2.6":
self.disconnect()
self.connect()
remove = drop
def link(self, table1, field1, table2, field2, join="left"):
""" Defines a relation between two tables in the database.
When executing a table query, fields from the linked table will also be available
(to disambiguate between field names, use table.field_name).
"""
if isinstance(table1, Table):
table1 = table1.name
if isinstance(table2, Table):
table2 = table2.name
self.relations.append((table1, field1, table2, field2, join))
def __repr__(self):
return "Database(name=%s, host=%s, tables=%s)" % (
repr(self.name),
repr(self.host),
repr(self.tables.keys()))
def _delete(self):
# No warning is issued, seems a bad idea to document the method.
# Anyone wanting to delete an entire database should use an editor.
if self.type == MYSQL:
self.execute("drop database `%s`" % self.name, commit=True)
self.disconnect()
if self.type == SQLITE:
self.disconnect()
os.unlink(self.name)
def __delete__(self):
try:
self.disconnect()
except:
pass
#### FIELD #########################################################################################
class _String(str):
# The STRING constant can be called with a length when passed to field(),
# for example field("language", type=STRING(2), default="en", index=True).
def __new__(self):
return str.__new__(self, "string")
def __call__(self, length=100):
return "varchar(%s)" % (length>255 and 255 or (length<1 and 1 or length))
# Field type.
# Note: SQLite string fields do not impose a string limit.
# Unicode strings have more characters than actually displayed (e.g. "♥").
# Boolean fields are stored as tinyint(1), int 0 or 1.
STRING, INTEGER, FLOAT, TEXT, BLOB, BOOLEAN, DATE = \
_String(), "integer", "float", "text", "blob", "boolean", "date"
STR, INT, BOOL = STRING, INTEGER, BOOLEAN
# Field index.
PRIMARY = "primary"
UNIQUE = "unique"
# DATE default.
NOW = "now"
#--- FIELD- ----------------------------------------------------------------------------------------
#def field(name, type=STRING, default=None, index=False, optional=True)
def field(name, type=STRING, **kwargs):
""" Returns a table field definition that can be passed to Database.create().
The column can be indexed by setting index to True, PRIMARY or UNIQUE.
Primary key number columns are always auto-incremented.
"""
default, index, optional = (
kwargs.get("default", type == DATE and NOW or None),
kwargs.get("index", False),
kwargs.get("optional", True)
)
if type == STRING:
type = STRING()
if type == FLOAT:
type = "real"
if type == BOOLEAN:
type = "tinyint(1)"
if type == DATE:
type = "timestamp"
if str(index) in "01":
index = bool(int(index))
if str(optional) in "01":
optional = bool(int(optional))
return (name, type, default, index, optional)
_field = field
def primary_key(name="id"):
""" Returns an auto-incremented integer primary key field named "id".
"""
return field(name, INTEGER, index=PRIMARY, optional=False)
pk = primary_key
#--- FIELD SCHEMA ----------------------------------------------------------------------------------
class Schema(object):
def __init__(self, name, type, default=None, index=False, optional=True, extra=None):
""" Field info returned from a "show columns from table"-query.
Each table object has a Table.schema{} dictionary describing the fields' structure.
"""
# Determine field type (NUMBER, STRING, TEXT, BLOB or DATE).
type, length = type.lower(), None
if type.startswith(("varchar", "char")):
length = type.split("(")[-1].strip(")")
length = int(length)
type = STRING
if type.startswith("int"):
type = INTEGER
if type.startswith(("real", "double")):
type = FLOAT
if type.startswith("time"):
type = DATE
if type.startswith("text"):
type = TEXT
if type.startswith("blob"):
type = BLOB
if type.startswith("tinyint(1)"):
type = BOOLEAN
# Determine index type (PRIMARY, UNIQUE, True or False).
if isinstance(index, basestring):
if index.lower().startswith("pri"):
index = PRIMARY
if index.lower().startswith("uni"):
index = UNIQUE
if index.lower() in ("0", "1", "", "yes", "mul"):
index = index.lower() in ("1", "yes", "mul")
# SQLite dumps the date string with quotes around it:
if isinstance(default, basestring) and type == DATE:
default = default.strip("'")
default = default.replace("current_timestamp", NOW)
default = default.replace("CURRENT_TIMESTAMP", NOW)
if default is not None and type == INTEGER:
default = int(default)
if default is not None and type == FLOAT:
default = float(default)
if not default and default != 0:
default = None
self.name = name # Field name.
self.type = type # Field type: INTEGER | FLOAT | STRING | TEXT | BLOB | DATE.
self.length = length # Field length for STRING.
self.default = default # Default value.
self.index = index # PRIMARY | UNIQUE | True | False.
self.optional = str(optional) in ("0", "True", "YES")
self.extra = extra or None
def __repr__(self):
return "Schema(name=%s, type=%s, default=%s, index=%s, optional=%s)" % (
repr(self.name),
repr(self.type),
repr(self.default),
repr(self.index),
repr(self.optional))
#### TABLE #########################################################################################
ALL = "*"
class TableError(Exception):
pass
class Table(object):
class Fields(list):
# Table.fields.append() alters the table.
# New field() with optional=False must have a default value (can not be NOW).
# New field() can have index=True, but not PRIMARY or UNIQUE.
def __init__(self, table, *args, **kwargs):
list.__init__(self, *args, **kwargs); self.table=table
def append(self, field):
name, (field, index) = field[0], self.table.db._field_SQL(self.table.name, field)
self.table.db.execute("alter table `%s` add column %s;" % (self.table.name, field))
self.table.db.execute(index, commit=True)
self.table._update()
def extend(self, fields):
[self.append(f) for f in fields]
def __setitem__(self, *args, **kwargs):
raise NotImplementedError, "Table.fields only supports append()"
insert = remove = pop = __setitem__
def __init__(self, name, database):
""" A collection of rows consisting of one or more fields (i.e., table columns)
of a certain type (i.e., strings, numbers).
"""
self.database = database
self._name = name
self.fields = [] # List of field names (i.e., column names).
self.schema = {} # Dictionary of (field, Schema)-items.
self.default = {} # Default values for Table.insert().
self.primary_key = None
self._update()
def _update(self):
# Retrieve table column names.
# Table column names are available in the Table.fields list.
# Table column names should not contain unicode because they can also be function parameters.
# Table column names should avoid " ", ".", "(" and ")".
# The primary key column is stored in Table.primary_key.
self.fields = Table.Fields(self)
if self.name not in self.database.tables:
raise TableError, "table '%s' does not exist" % (self.database.name + "." + self.name)
if self.db.type == MYSQL:
q = "show columns from `%s`;" % self.name
if self.db.type == SQLITE:
q = "pragma table_info(`%s`);" % self.name
i = self.db.execute("pragma index_list(`%s`)" % self.name) # look up indices
i = dict(((v[1].replace(self.name+"_", "", 1), v[2]) for v in i))
for f in self.db.execute(q):
# [name, type, default, index, optional, extra]
if self.db.type == MYSQL:
f = [f[0], f[1], f[4], f[3], f[2], f[5]]
if self.db.type == SQLITE:
f = [f[1], f[2], f[4], f[5], f[3], ""]
f[3] = f[3] == 1 and "pri" or (f[0] in i and ("1","uni")[int(i[f[0]])] or "")
list.append(self.fields, f[0])
self.schema[f[0]] = Schema(*f)
if self.schema[f[0]].index == PRIMARY:
self.primary_key = f[0]
def _get_name(self):
return self._name
def _set_name(self, name):
# Rename the table in the database and in any Database.relations.
# SQLite and MySQL will automatically copy indices on the new table.
self.db.execute("alter table `%s` rename to `%s`;" % (self._name, name))
self.db.tables.pop(self._name)
self.db.tables[name] = self
for i, r in enumerate(self.db.relations):
if r[0] == self._name:
self.db.relations = (name, r[1], r[2], r[3])
if r[2] == self.name:
self.db.relations = (r[0], r[1], name, r[3])
self._name = name
name = property(_get_name, _set_name)
@property
def db(self):
return self.database
@property
def pk(self):
return self.primary_key
def count(self):
""" Yields the number of rows in the table.
"""
return int(list(self.db.execute("select count(*) from `%s`;" % self.name))[0][0])
def __len__(self):
return self.count()
def __iter__(self):
return self.iterrows()
def __getitem__(self, id):
return self.filter(ALL, id=id)
def __setitem__(self, id, row):
self.delete(id)
self.update(self.insert(row), {"id": id})
def __delitem__(self, id):
self.delete(id)
def abs(self, field):
""" Returns the absolute field name (e.g., "name" => ""persons.name").
"""
return abs(self.name, field)
def iterrows(self):
""" Returns an iterator over the rows in the table.
"""
return self.db.execute("select * from `%s`;" % self.name)
def rows(self):
""" Returns a list of all the rows in the table.
"""
return list(self.iterrows())
def record(self, row):
""" Returns the given row as a dictionary of (field or alias, value)-items.
"""
return dict(zip(self.fields, row))
def filter(self, *args, **kwargs):
""" Returns the rows that match the given constraints (using equals + AND):
"""
# Table.filter(("name","age"), id=1)
# Table.filter(ALL, type=("cat","dog")) => "cat" OR "dog"
# Table.filter(ALL, type="cat", name="Taxi") => "cat" AND "Taxi"
# Table.filter({"type":"cat", "name":"Taxi"})
class rowlist(list):
def __init__(self, table, data):
list.__init__(self, data); self.table=table
def record(self, row):
return self.table.record(row)
if len(args) == 0:
# No parameters: default to ALL fields.
fields = ALL
elif len(args) == 1 and not isinstance(args[0], dict):
# One parameter: field / list of fields + optional keyword filters.
fields = args[0]
elif len(args) == 1:
# One parameter: dict of filters
fields, kwargs = ALL, args[0]
elif len(args) >= 2:
# Two parameters: field(s) and dict of filters.
fields, kwargs = args[0], args[1]
fields = isinstance(fields, (list, tuple)) and ", ".join(fields) or fields or ALL
q = " and ".join(cmp(k, v, "=", self.db.escape) for k, v in kwargs.items())
q = q and " where %s" % q or ""
q = "select %s from `%s`%s;" % (fields, self.name, q)
return rowlist(self, self.db.execute(q))
def find(self, *args, **kwargs):
return self.filter(*args, **kwargs)
def search(self, *args, **kwargs):
""" Returns a Query object that can be used to construct complex table queries.
"""
return Query(self, *args, **kwargs)
query = search
def _insert_id(self):
# Retrieves the primary key value of the last inserted row.
if self.db.type == MYSQL:
return list(self.db.execute("select last_insert_id();"))[0][0] or None
if self.db.type == SQLITE:
return list(self.db.execute("select last_insert_rowid();"))[0][0] or None
def insert(self, *args, **kwargs):
""" Inserts a new row from the given field parameters, returns id.
"""
# Table.insert(name="Taxi", age=2, type="cat")
# Table.insert({"name":"Fricassée", "age":2, "type":"cat"})
commit = kwargs.pop("commit", True) # As fieldname, use abs(Table.name, "commit").
if len(args) == 0 and len(kwargs) == 1 and isinstance(kwargs.get("values"), dict):
kwargs = kwargs["values"]
elif len(args) == 1 and isinstance(args[0], dict):
kwargs = dict(args[0], **kwargs)
elif len(args) == 1 and isinstance(args[0], (list, tuple)):
kwargs = dict(zip((f for f in self.fields if f != self.pk), args[0]))
if len(self.default) > 0:
kwargs.update(self.default)
k = ", ".join("`%s`" % k for k in kwargs.keys())
v = ", ".join(self.db.escape(v) for v in kwargs.values())
q = "insert into `%s` (%s) values (%s);" % (self.name, k, v)
self.db.execute(q, commit)
return self._insert_id()
def update(self, id, *args, **kwargs):
""" Updates the row with the given id.
"""
# Table.update(1, age=3)
# Table.update(1, {"age":3})
# Table.update(all(filter(field="name", value="Taxi")), age=3)
commit = kwargs.pop("commit", True) # As fieldname, use abs(Table.name, "commit").
if len(args) == 0 and len(kwargs) == 1 and isinstance(kwargs.get("values"), dict):
kwargs = kwargs["values"]
if len(args) == 1 and isinstance(args[0], dict):
a=args[0]; a.update(kwargs); kwargs=a
kv = ", ".join("`%s`=%s" % (k, self.db.escape(v)) for k, v in kwargs.items())
q = "update `%s` set %s where %s;" % (self.name, kv,
not isinstance(id, Group) and cmp(self.primary_key, id, "=", self.db.escape) \
or id.SQL(escape=self.db.escape))
self.db.execute(q, commit)
def delete(self, id, commit=True):
""" Removes the row which primary key equals the given id.
"""
# Table.delete(1)
# Table.delete(ALL)
# Table.delete(all(("type","cat"), ("age",15,">")))
q = "delete from `%s` where %s" % (self.name,
not isinstance(id, Group) and cmp(self.primary_key, id, "=", self.db.escape) \
or id.SQL(escape=self.db.escape))
self.db.execute(q, commit)
append, edit, remove = insert, update, delete
@property
def xml(self):
return xml(self)
def datasheet(self):
return Datasheet(rows=self.rows(), fields=[(f, self.schema[f].type) for f in self.fields])
def __repr__(self):
return "Table(name=%s, count=%s, database=%s)" % (
repr(self.name),
repr(self.count()),
repr(self.db.name))
#### QUERY #########################################################################################
#--- QUERY SYNTAX ----------------------------------------------------------------------------------
BETWEEN, LIKE, IN = \
"between", "like", "in"
sql_functions = \
"first|last|count|min|max|sum|avg|stdev|group_concat|concatenate|" \
"year|month|day|hour|minute|second|" \
"length|lower|upper|substr|substring|replace|trim|round|random|rand|" \
"strftime|date_format"
def abs(table, field):
""" For a given <fieldname>, returns the absolute <tablename>.<fieldname>.
This is useful when constructing queries with relations to other tables.
"""
def _format(s):
if not "." in s:
# Field could be wrapped in a function: year(date) => year(table.date).
p = s.endswith(")") and re.match(r"^("+sql_functions+r")\(", s, re.I) or None
i = p and len(p.group(0)) or 0
return "%s%s.%s" % (s[:i], table, s[i:])
return s
if isinstance(field, (list, tuple)):
return [_format(f) for f in field]
return _format(field)
def cmp(field, value, comparison="=", escape=lambda v: _escape(v), table=""):
""" Returns an SQL WHERE comparison string using =, i=, !=, >, <, >=, <= or BETWEEN.
Strings may contain wildcards (*) at the start or at the end.
A list or tuple of values can be given when using =, != or BETWEEN.
"""
# Use absolute field names if table name is given:
if table:
field = abs(table, field)
# cmp("name", "Mar*") => "name like 'Mar%'".
if isinstance(value, basestring) and (value.startswith(("*","%")) or value.endswith(("*","%"))):
if comparison in ("=", "i=", "==", LIKE):
return "%s like %s" % (field, escape(value.replace("*","%")))
if comparison in ("!=", "<>"):
return "%s not like %s" % (field, escape(value.replace("*","%")))
# cmp("name", "markov") => "name" like 'markov'" (case-insensitive).
if isinstance(value, basestring):
if comparison == "i=":
return "%s like %s" % (field, escape(value))
# cmp("type", ("cat", "dog"), "!=") => "type not in ('cat','dog')".
# cmp("amount", (10, 100), ":") => "amount between 10 and 100".
if isinstance(value, (list, tuple)):
if find(lambda v: isinstance(v, basestring) and (v.startswith("*") or v.endswith("*")), value):
return "(%s)" % any(*[(field, v) for v in value]).sql(escape=escape)
if comparison in ("=", "==", IN):
return "%s in (%s)" % (field, ",".join(escape(v) for v in value))
if comparison in ("!=", "<>"):
return "%s not in (%s)" % (field, ",".join(escape(v) for v in value))
if comparison in (":", BETWEEN):
return "%s between %s and %s" % (field, escape(value[0]), escape(value[1]))
# cmp("type", None, "!=") => "type is not null".
if isinstance(value, type(None)):
if comparison in ("=", "=="):
return "%s is null" % field
if comparison in ("!=", "<>"):
return "%s is not null" % field
# Using a subquery:
if isinstance(value, Query):
if comparison in ("=", "==", IN):
return "%s in %s" % (field, escape(value))
if comparison in ("!=", "<>"):
return "%s not in %s" % (field, escape(value))
return "%s%s%s" % (field, comparison, escape(value))
# Functions for date fields: cmp(year("date"), 1999, ">").
def year(date):
return "year(%s)" % date
def month(date):
return "month(%s)" % date
def day(date):
return "day(%s)" % date
def hour(date):
return "hour(%s)" % date
def minute(date):
return "minute(%s)" % date
def second(date):
return "second(%s)" % date
# Aggregate functions.
def count(value):
return "count(%s)" % value
def sum(value):
return "sum(%s)" % value
#--- QUERY FILTER ----------------------------------------------------------------------------------
AND, OR = "and", "or"
def filter(field, value, comparison="="):
return (field, value, comparison)
class Group(list):
def __init__(self, *args, **kwargs):
""" A list of SQL WHERE filters combined with AND/OR logical operator.
"""
list.__init__(self, args)
self.operator = kwargs.get("operator", AND)
def SQL(self, **kwargs):
""" For example, filter for small pets with tails or wings
(which is not the same as small pets with tails or pets with wings):
>>> Group(
>>> filter("type", "pet"),
>>> filter("weight", (4,6), ":"),
>>> Group(
>>> filter("tail", True),
>>> filter("wing", True), operator=OR))
Yields:
"type='pet' and weight between 4 and 6 and (tail=1 or wing=1)"
"""
# Remember to pass the right escape() function as optional parameter.
a = []
for filter in self:
# Traverse subgroups recursively.
if isinstance(filter, Group):
a.append("(%s)" % filter.SQL(**kwargs))
continue
# Convert filter() to string with cmp() - see above.
if isinstance(filter, (list, tuple)):
a.append(cmp(*filter, **kwargs))
continue
raise TypeError, "Group can contain other Group or filter(), not %s" % type(filter)
return (" %s " % self.operator).join(a)
sql = SQL
def all(*args):
""" Returns a group of filters combined with AND.
"""
return Group(*args, **dict(operator=AND))
def any(*args):
""" Returns a group of filters combined with OR.
"""
return Group(*args, **dict(operator=OR))
# From a GET-query dict:
# all(*dict.items())
# filter() value can also be a Query with comparison=IN.
#--- QUERY -----------------------------------------------------------------------------------------
# Relations:
INNER = "inner" # The rows for which there is a match in both tables (same as join=None).
LEFT = "left" # All rows from this table, with field values from the related table when possible.
RIGHT = "right" # All rows from the related table, with field values from this table when possible.
FULL = "full" # All rows form both tables.
def relation(field1, field2, table, join=LEFT):
return (field1, field2, table, join)
rel = relation
# Sorting:
ASCENDING = "asc"
DESCENDING = "desc"
# Grouping:
FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE = \
"first", "last", "count", "max", "min", "sum", "avg", "stdev", "group_concat"
class Query(object):
id, cache = 0, {}
def __init__(self, table, fields=ALL, filters=[], relations=[], sort=None, order=ASCENDING, group=None, function=FIRST, range=None):
""" A selection of rows from the given table, filtered by any() and all() constraints.
"""
# Table.search(ALL, filters=any(("type","cat"), ("type","dog")) => cats and dogs.
# Table.search(("type", "name")), group="type", function=COUNT) => all types + amount per type.
# Table.search(("name", "types.has_tail"), relations=[("types","type","id")]) => links type to types.id.
Query.id += 1
filters = Group(*filters, **dict(operator=isinstance(filters, Group) and filters.operator or AND))
self._id = Query.id
self._table = table
self.fields = fields # A field name, list of field names or ALL.
self.aliases = {} # A dictionary of field name aliases, used with Query.xml or Query-in-Query.
self.filters = filters # A group of filter() objects.
self.relations = relations # A list of rel() objects.
self.sort = sort # A field name, list of field names or field index for sorting.
self.order = order # ASCENDING or DESCENDING.
self.group = group # A field name, list of field names or field index for folding.
self.function = function # FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV or CONCATENATE (or list).
self.range = range # A (index1, index2)-tuple. The first row in the table is 0.
@property
def table(self):
return self._table
def __len__(self):
return len(list(self.rows()))
def __iter__(self):
return self.execute()
def __getitem__(self, i):
return self.rows()[i] # poor performance
def SQL(self):
""" Yields the SQL syntax of the query, which can be passed to Database.execute().
The SQL string will be cached for faster reuse.
"""
#if self._id in Query.cache:
# return Query.cache[self._id]
# Construct the SELECT clause from Query.fields.
g = not isinstance(self.group, (list, tuple)) and [self.group] or self.group
g = [abs(self._table.name, f) for f in g if f is not None]
fields = not isinstance(self.fields, (list, tuple)) and [self.fields] or self.fields
fields = [f in self.aliases and "%s as %s" % (f, self.aliases[f]) or f for f in fields]
fields = abs(self._table.name, fields)
# With a GROUPY BY clause, fields not used for grouping are wrapped in the given function.
# The function can also be a list of functions for each field (FIRST by default).
if g and isinstance(self.function, basestring):
fields = [f in g and f or "%s(%s)" % (self.function, f) for f in fields]
if g and isinstance(self.function, (list, tuple)):
fields = [f in g and f or "%s(%s)" % (F,f) for F,f in zip(self.function+[FIRST]*len(fields), fields)]
q = []
q.append("select %s" % ", ".join(fields))
# Construct the FROM clause from Query.relations.
# Table relations defined on the database are taken into account,
# but overridden by relations defined on the query.
q.append("from `%s`" % self._table.name)
relations = {}
for key1, key2, table, join in (rel(*r) for r in self.relations):
table = isinstance(table, Table) and table.name or table
relations[table] = (key1, key2, join)
for table1, key1, table2, key2, join in self._table.db.relations:
if table1 == self._table.name:
relations.setdefault(table2, (key1, key2, join))
if table2 == self._table.name:
relations.setdefault(table1, (key1, key2, join==LEFT and RIGHT or (join==RIGHT and LEFT or join)))
# Define relations only for tables whose fields are actually selected.
for (table, (key1, key2, join)) in relations.items():
for f in fields:
if table + "." in f:
q.append("%sjoin `%s`" % (join and join+" " or "", table))
q.append("on %s=%s" % (abs(self._table.name, key1), abs(self._table.db[table].name, key2)))
break
# Construct the WHERE clause from Query.filters.SQL().
# Use the database's escape function and absolute field names.
if len(self.filters) > 0:
q.append("where %s" % self.filters.SQL(escape=self._table.db.escape, table=self._table.name))
# Construct the ORDER BY clause from Query.sort and Query.order.
# Construct the GROUP BY clause from Query.group.
for clause, value in (("order", self.sort), ("group", self.group)):
if isinstance(value, basestring) and value != "":
q.append("%s by %s" % (clause, abs(self._table.name, value)))
elif isinstance(value, (list, tuple)) and len(value) > 0:
q.append("%s by %s" % (clause, ", ".join(abs(self._table.name, value))))
elif isinstance(value, int):
q.append("%s by %s" % (clause, abs(self._table.name, self._table.fields[value])))
if self.sort and clause == "order":
if self.order in (ASCENDING, DESCENDING):
q.append("%s" % self.order)
elif isinstance(self.order, (list, tuple)):
q[-1] = ",".join(" ".join(v) for v in zip(q[-1].split(","), self.order))
# Construct the LIMIT clause from Query.range.
if self.range:
q.append("limit %s, %s" % (str(self.range[0]), str(self.range[1])))
q = " ".join(q) + ";"
# Cache the SQL-string for faster retrieval.
#if len(Query.cache) > 100:
# Query.cache.clear()
#Query.cache[self._id] = q # XXX cache is not updated when properties change.
return q
sql = SQL
def execute(self):
""" Executes the query and returns an iterator over the matching rows in the table.
"""
return self._table.db.execute(self.SQL())
def iterrows(self):
""" Executes the query and returns an iterator over the matching rows in the table.
"""
return self.execute()
def rows(self):
""" Executes the query and returns the matching rows from the table.
"""
return list(self.execute())
def record(self, row):
""" Returns the given row as a dictionary of (field or alias, value)-items.
"""
return dict(zip((self.aliases.get(f,f) for f in self.fields), row))
@property
def xml(self):
return xml(self)
def __repr__(self):
return "Query(sql=%s)" % repr(self.SQL())
def associative(query):
""" Yields query rows as dictionaries of (field, value)-items.
"""
for row in query:
yield query.record(row)
assoc = associative
#### VIEW ##########################################################################################
# A representation of data based on a table in the database.
# The render() method can be overridden to output data in a certain format (e.g., HTML for a web app).
class View(object):
def __init__(self, database, table, schema=[]):
""" A representation of data.
View.render() should be overridden in a subclass.
"""
self.database = database
self._table = isinstance(table, Table) and table.name or table
self.schema = schema # A list of table fields - see field().
@property
def db(self):
return self.database
@property
def table(self):
# If it doesn't exist, create the table from View.schema.
if not self._table in self.db:
self.setup()
return self.db[self._table]
def setup(self, overwrite=False):
""" Creates the database table from View.schema, optionally overwriting the old table.
"""
if overwrite:
self.db.drop(self._table)
if not self._table in self.db:
self.db.create(self._table, self.schema)
def render(self, *path, **query):
""" This method should be overwritten to return formatted table output (XML, HTML, RSS, ...)
For web apps, the given path should list all parts in the relative URL path,
and query is a dictionary of all POST and GET variables sent from the client.
For example: http://books.com/science/new
=> ["science", "new"]
=> render() data from db.books.filter(ALL, category="science", new=True).
"""
pass
# CherryPy-specific.
def default(self, *path, **query):
return self.render(*path, **query)
default.exposed = True
#### XML PARSER ####################################################################################
XML_HEADER = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
def _unpack_fields(table, fields=[]):
""" Replaces "*" with the actual field names.
Fields from related tables keep the "<tablename>." prefix.
"""
u = []
for f in fields:
a, b = "." in f and f.split(".", 1) or (table.name, f)
if a == table.name and b == ALL:
# <table>.*
u.extend(f for f in table.db.tables[a].fields)
elif a != table.name and b == ALL:
# <related-table>.*
u.extend("%s.%s" % (a, f) for f in table.db.tables[a].fields)
elif a != table.name:
# <related-table>.<field>
u.append("%s.%s" % (a, b))
else:
# <field>
u.append(b)
return u
def xml_format(a):
""" Returns the given attribute (string, int, float, bool, None) as a quoted unicode string.
"""
if isinstance(a, basestring):
return "\"%s\"" % encode_entities(a)
if isinstance(a, bool):
return "\"%s\"" % ("no","yes")[int(a)]
if isinstance(a, (int, long)):
return "\"%s\"" % a
if isinstance(a, float):
return "\"%s\"" % round(a, 5)
if isinstance(a, type(None)):
return "\"\""
if isinstance(a, Date):
return "\"%s\"" % str(a)
if isinstance(a, datetime.datetime):
return "\"%s\"" % str(date(mktime(a.timetuple())))
def xml(rows):
""" Returns the rows in the given Table or Query as an XML-string, for example:
<?xml version="1.0" encoding="utf-8"?>
<table name="pets", fields="id, name, type" count="2">
<schema>
<field name="id", type="integer", index="primary", optional="no" />
<field name="name", type="string", length="50" />
<field name="type", type="string", length="50" />
</schema>
<rows>
<row id="1", name="Taxi", type="cat" />
<row id="2", name="Hofstadter", type="dog" />
</rows>
</table>
"""
if isinstance(rows, Table):
root, table, rows, fields, aliases = "table", rows, rows.rows(), rows.fields, {}
if isinstance(rows, Query):
root, table, rows, fields, aliases, = "query", rows.table, rows.rows(), rows.fields, rows.aliases
fields = _unpack_fields(table, fields)
# <table name="" fields="" count="">
# <query table="" fields="" count="">
xml = []
xml.append(XML_HEADER)
xml.append("<%s %s=%s fields=\"%s\" count=\"%s\">" % (
root,
root != "table" and "table" or "name",
xml_format(table.name), # Use Query.aliases as field names.
", ".join(encode_entities(aliases.get(f,f)) for f in fields),
len(rows)))
# <schema>
# Field information is retrieved from the (related) table schema.
# If the XML is imported as a Table, the related fields become part of it.
xml.append("\t<schema>")
for f in fields:
if f not in table.schema:
s = f.split(".")
s = table.db[s[0]].schema[s[-1]]
else:
s = table.schema[f]
# <field name="" type="" length="" default="" index="" optional="" extra="" />
xml.append("\t\t<field name=%s type=%s%s%s%s%s%s />" % (
xml_format(aliases.get(f,f)),
xml_format(s.type),
s.length is not None and " length=%s" % xml_format(s.length) or "",
s.default is not None and " default=%s" % xml_format(s.default) or "",
s.index is not False and " index=%s" % xml_format(s.index) or "",
s.optional is not True and " optional=%s" % xml_format(s.optional) or "",
s.extra is not None and " extra=%s" % xml_format(s.extra) or ""))
xml.append("\t</schema>")
xml.append("\t<rows>")
# <rows>
for r in rows:
# <row field="value" />
xml.append("\t\t<row %s />" % " ".join("%s=%s" % (aliases.get(k,k), xml_format(v)) for k, v in zip(fields, r)))
xml.append("\t</rows>")
xml.append("</%s>" % root)
xml = "\n".join(xml)
xml = encode_utf8(xml)
return xml
def parse_xml(database, xml, table=None, field=lambda s: s.replace(".", "-")):
""" Creates a new table in the given database from the given XML-string.
The XML must be in the format generated by Table.xml.
If the table already exists, raises a TableError.
The given table parameter can be used to rename the table.
The given field function can be used to rename field names.
"""
def _attr(node, attribute, default=""):
return node.getAttribute(attribute) or default
# parseString() will decode entities, no need for decode_entities().
from xml.dom.minidom import parseString
dom = parseString(encode_utf8(xml))
a = dom.getElementsByTagName("table")
b = dom.getElementsByTagName("query")
if len(a) > 0:
table = table or _attr(a[0], "name", "")
if len(b) > 0:
table = table or _attr(b[0], "table", "")
# Parse field information (i.e., field name, field type, etc.)
fields, schema, rows = [], [], []
for f in dom.getElementsByTagName("field"):
fields.append(_attr(f, "name"))
schema.append(_field(
name = field(_attr(f, "name")),
type = _attr(f, "type") == STRING and STRING(int(_attr(f, "length", 255))) or _attr(f, "type"),
default = _attr(f, "default", None),
index = _attr(f, "index", False),
optional = _attr(f, "optional", True) != "no"
))
# Integer primary key is always auto-increment.
# The id's in the new table will differ from those in the XML.
if _attr(f, "index") == PRIMARY and _attr(f, "type") == INTEGER:
fields.pop()
# Parse row data.
for r in dom.getElementsByTagName("row"):
rows.append({})
for i, f in enumerate(fields):
v = _attr(r, f, None)
if schema[i][1] == BOOLEAN:
rows[-1][f] = (0,1)[v!="no"]
else:
rows[-1][f] = v
# Create table if not exists and insert rows.
if database.connected is False:
database.connect()
if table in database:
raise TableError, "table '%s' already exists" % table
database.create(table, fields=schema)
for r in rows:
database[table].insert(r, commit=False)
database.commit()
return database[table]
#### JSON PARSER ###################################################################################
# JSON is useful to store nested data in a Database or Datasheet.
# 1) Try to import Python 2.6+ json module.
# 2) Try to import pattern.web simplejson module.
# 3) Otherwise, use trivial algorithm below.
class json(object):
def __init__(self):
self.float = lambda f: re.sub(r"0+$", "0", "%.3f" % f)
self.escape = [
("\\", "\\\\"),
( '"', '\\"' ),
("\n", "\\n" ),
("\r", "\\r "),
("\t", "\\t" )
]
def _split(self, s, sep=",", parens=[["[","{",'"'], ["]","}",'"']]):
""" Splits the string on the given separator (unless the separator is inside parentheses).
"""
(p1, p2), p, i = parens, [], 0
for j, ch in enumerate(s):
if ch == sep and not p:
yield s[i:j]; i=j+1
elif ch in p2 and p and p[-1] == p1[p2.index(ch)]:
p.pop()
elif ch in p1:
p.append(ch)
yield s[i:]
def encode(self, s):
for a, b in self.escape:
s = s.replace(a, b)
return '"%s"' % s
def decode(self, s):
for a, b in self.escape:
s = s.replace(b, a)
return s.strip('"')
def loads(self, string, *args, **kwargs):
""" Returns the data parsed from the given JSON string.
The data can be a nested structure of dict, list, str, unicode, bool, int, float and None.
"""
s = string.strip()
if s.startswith('"'):
return self.decode(s)
if s.isdigit():
return int(s)
if s.replace(".", "", 1).isdigit():
return float(s)
if s in ("true", "false"):
return bool(s == "true")
if s == "null":
return None
if s.startswith("{"):
return dict(map(self.loads, self._split(kv, ":")) for kv in self._split(s.strip("{}")))
if s.startswith("["):
return list(self.loads(v) for v in self._split(s.strip("[]")))
raise TypeError, "can't process %s." % repr(string)
def dumps(self, obj, *args, **kwargs):
""" Returns a JSON string from the given data.
The data can be a nested structure of dict, list, str, unicode, bool, int, float and None.
"""
if isinstance(obj, (str, unicode)):
return self.encode(obj)
if isinstance(obj, (int, long)): # Also validates bools, so those are handled first.
return str(obj)
if isinstance(obj, float):
return str(self.float(obj))
if isinstance(obj, bool):
return obj and "true" or "false"
if isinstance(obj, type(None)):
return "null"
if isinstance(obj, dict):
return "{%s}" % ", ".join(['%s: %s' % tuple(map(self.dumps, kv)) for kv in sorted(obj.items())])
if isinstance(obj, (list, tuple, GeneratorType)):
return "[%s]" % ", ".join(self.dumps(v) for v in obj)
raise TypeError, "can't process %s." % type(obj)
try: import json # Python 2.6+
except:
try: from pattern.web import json # simplejson
except:
pass
#db = Database("test")
#db.create("persons", (pk(), field("data", TEXT)))
#db.persons.append((json.dumps({"name": u"Schrödinger", "type": "cat"}),))
#
#for id, data in db.persons:
# print id, json.loads(data)
#### DATASHEET #####################################################################################
#--- CSV -------------------------------------------------------------------------------------------
# Raise the default field size limit:
csvlib.field_size_limit(sys.maxint)
def csv_header_encode(field, type=STRING):
# csv_header_encode("age", INTEGER) => "age (INTEGER)".
t = re.sub(r"^varchar\(.*?\)", "string", (type or ""))
t = t and " (%s)" % t or ""
s = "%s%s" % (encode_utf8(field or ""), t.upper())
return s
def csv_header_decode(s):
# csv_header_decode("age (INTEGER)") => ("age", INTEGER).
p = r"STRING|INTEGER|FLOAT|TEXT|BLOB|BOOLEAN|DATE|"
p = re.match(r"(.*?) \(("+p+")\)", s)
s = s.endswith(" ()") and s[:-3] or s
return p and (string(p.group(1), default=None), p.group(2).lower()) or (string(s) or None, None)
class CSV(list):
def __new__(cls, rows=[], fields=None, **kwargs):
""" A list of lists that can be imported and exported as a comma-separated text file (CSV).
"""
# From a CSV file path:
if isinstance(rows, basestring) and os.path.exists(rows):
csv = cls.load(rows, **kwargs)
# From a list of rows:
else:
csv = list.__new__(cls); list.extend(csv, rows)
# CSV.fields is a list of (name, type)-tuples,
# with type STRING, INTEGER, FLOAT, DATE or BOOLEAN.
csv.__dict__["fields"] = fields or kwargs.get("headers", None)
return csv
def __init__(self, rows=[], fields=None, **kwargs):
pass
def _set_headers(self, v):
self.__dict__["fields"] = v
def _get_headers(self):
return self.__dict__["fields"]
headers = property(_get_headers, _set_headers)
def save(self, path, separator=",", encoder=lambda v: v, headers=False, **kwargs):
""" Exports the table to a unicode text file at the given path.
Rows in the file are separated with a newline.
Columns in a row are separated with the given separator (by default, comma).
For data types other than string, int, float, bool or None, a custom string encoder can be given.
"""
# Optional parameters include all arguments for csv.writer(), see:
# http://docs.python.org/library/csv.html#csv.writer
kwargs.setdefault("delimiter", separator)
kwargs.setdefault("quoting", csvlib.QUOTE_ALL)
# csv.writer will handle str, int, float and bool:
s = StringIO()
w = csvlib.writer(s, **kwargs)
if headers and self.fields is not None:
w.writerows([[csv_header_encode(name, type) for name, type in self.fields]])
w.writerows([[encode_utf8(encoder(v)) for v in row] for row in self])
s = s.getvalue()
s = s.strip()
s = re.sub("([^\"]|^)\"None\"", "\\1None", s)
f = open(path, "wb")
f.write(BOM_UTF8)
f.write(s)
f.close()
@classmethod
def load(cls, path, separator=",", decoder=lambda v: v, headers=False, preprocess=lambda s: s, **kwargs):
""" Returns a table from the data in the given text file.
Rows are expected to be separated by a newline.
Columns are expected to be separated by the given separator (by default, comma).
Strings will be converted to int, float, bool, date or None if headers are parsed.
For other data types, a custom string decoder can be given.
"""
# Date objects are saved and loaded as strings, but it is easy to convert these back to dates:
# - set a DATE field type for the column,
# - or do Table.columns[x].map(lambda s: date(s))
data = open(path, "rb").read().replace(BOM_UTF8, "")
data = preprocess(data)
data = "\n".join(line for line in data.splitlines()) # Excel \r => \n
data = StringIO(data)
data = [row for row in csvlib.reader(data, delimiter=separator)]
if headers:
fields = [csv_header_decode(field) for field in data.pop(0)]
fields += [(None, None)] * (max([0]+[len(row) for row in data]) - len(fields))
else:
fields = []
if not fields:
# Cast fields using the given decoder (by default, all strings + None).
data = [[decoder(decode_utf8(v) if v != "None" else None) for v in row] for row in data]
else:
# Cast fields to their defined field type (STRING, INTEGER, ...)
for i, row in enumerate(data):
for j, v in enumerate(row):
type = fields[j][1]
if row[j] == "None":
row[j] = decoder(None)
elif type is None:
row[j] = decoder(decode_utf8(v))
elif type in (STRING, TEXT):
row[j] = decode_utf8(v)
elif type == INTEGER:
row[j] = int(row[j])
elif type == FLOAT:
row[j] = float(row[j])
elif type == BOOLEAN:
row[j] = bool(row[j])
elif type == DATE:
row[j] = date(row[j])
elif type == BLOB:
row[j] = v
else:
row[j] = decoder(decode_utf8(v))
return cls(rows=data, fields=fields)
#--- DATASHEET -------------------------------------------------------------------------------------
class Datasheet(CSV):
def __init__(self, rows=[], fields=None, **kwargs):
""" A matrix of rows and columns, where each row and column can be retrieved as a list.
Values can be any kind of Python object.
"""
# NumPy array, convert to list of int/float/str/bool.
if rows.__class__.__name__ == "ndarray":
rows = rows.tolist()
self.__dict__["_rows"] = DatasheetRows(self)
self.__dict__["_columns"] = DatasheetColumns(self)
self.__dict__["_m"] = 0 # Number of columns per row, see Datasheet.insert().
CSV.__init__(self, rows, fields, **kwargs)
def _get_rows(self):
return self._rows
def _set_rows(self, rows):
# Datasheet.rows property can't be set, except in special case Datasheet.rows += row.
if isinstance(rows, DatasheetRows) and rows._datasheet == self:
self._rows = rows; return
raise AttributeError, "can't set attribute"
rows = property(_get_rows, _set_rows)
def _get_columns(self):
return self._columns
def _set_columns(self, columns):
# Datasheet.columns property can't be set, except in special case Datasheet.columns += column.
if isinstance(columns, DatasheetColumns) and columns._datasheet == self:
self._columns = columns; return
raise AttributeError, "can't set attribute"
columns = cols = property(_get_columns, _set_columns)
def __getattr__(self, k):
""" Columns can be retrieved by field name, e.g., Datasheet.date.
"""
#print "Datasheet.__getattr__", k
if k in self.__dict__:
return self.__dict__[k]
for i, f in enumerate(f[0] for f in self.__dict__["fields"] or []):
if f == k:
return self.__dict__["_columns"][i]
raise AttributeError, "'Datasheet' object has no attribute '%s'" % k
def __setattr__(self, k, v):
""" Columns can be set by field name, e.g., Datasheet.date = [...].
"""
#print "Datasheet.__setattr__", k
if k in self.__dict__:
self.__dict__[k] = v
return
if k == "rows":
self._set_rows(v)
return
if k == "columns":
self._set_columns(v)
return
if k == "headers":
self._set_headers(v)
return
for i, f in enumerate(f[0] for f in self.__dict__["fields"] or []):
if f == k:
self.__dict__["_columns"].__setitem__(i, v); return
raise AttributeError, "'Datasheet' object has no attribute '%s'" % k
def __setitem__(self, index, value):
""" Sets an item or row in the matrix.
For Datasheet[i] = v, sets the row at index i to v.
For Datasheet[i,j] = v, sets the value in row i and column j to v.
"""
if isinstance(index, tuple):
list.__getitem__(self, index[0])[index[1]] = value
elif isinstance(index, int):
self.pop(index)
self.insert(index, value)
else:
raise TypeError, "Datasheet indices must be int or tuple"
def __getitem__(self, index):
""" Returns an item, row or slice from the matrix.
For Datasheet[i], returns the row at the given index.
For Datasheet[i,j], returns the value in row i and column j.
"""
if isinstance(index, (int, slice)):
# Datasheet[i] => row i.
return list.__getitem__(self, index)
if isinstance(index, tuple):
i, j = index
# Datasheet[i,j] => item from column j in row i.
# Datasheet[i,j1:j2] => columns j1-j2 from row i.
if not isinstance(i, slice):
return list.__getitem__(self, i)[j]
# Datasheet[i1:i2,j] => column j from rows i1-i2.
if not isinstance(j, slice):
return [row[j] for row in list.__getitem__(self, i)]
# Datasheet[i1:i2,j1:j2] => Datasheet with columns j1-j2 from rows i1-i2.
return Datasheet(
rows = (row[j] for row in list.__getitem__(self, i)),
fields = self.fields and self.fields[j] or self.fields)
raise TypeError, "Datasheet indices must be int, tuple or slice"
def __getslice__(self, i, j):
# Datasheet[i1:i2] => Datasheet with rows i1-i2.
return Datasheet(
rows = list.__getslice__(self, i, j),
fields = self.fields)
def __delitem__(self, index):
self.pop(index)
# datasheet1 = datasheet2 + datasheet3
# datasheet1 = [[...],[...]] + datasheet2
# datasheet1 += datasheet2
def __add__(self, datasheet):
m = self.copy(); m.extend(datasheet); return m
def __radd__(self, datasheet):
m = Datasheet(datasheet); m.extend(self); return m
def __iadd__(self, datasheet):
self.extend(datasheet); return self
def insert(self, i, row, default=None):
""" Inserts the given row into the matrix.
Missing columns at the end (right) will be filled with the default value.
"""
try:
# Copy the row (fast + safe for generators and DatasheetColumns).
row = [v for v in row]
except:
raise TypeError, "Datasheet.insert(x): x must be list"
list.insert(self, i, row)
m = max((len(self) > 1 and self._m or 0, len(row)))
if len(row) < m:
row.extend([default] * (m-len(row)))
if self._m < m:
# The given row might have more columns than the rows in the matrix.
# Performance takes a hit when these rows have to be expanded:
for row in self:
if len(row) < m:
row.extend([default] * (m-len(row)))
self.__dict__["_m"] = m
def append(self, row, default=None, _m=None):
self.insert(len(self), row, default)
def extend(self, rows, default=None):
for row in rows:
self.insert(len(self), row, default)
def group(self, j, function=FIRST, key=lambda v: v):
""" Returns a datasheet with unique values in column j by grouping rows with the given function.
The function takes a list of column values as input and returns a single value,
e.g. FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE.
The function can also be a list of functions (one for each column).
TypeError will be raised when the function cannot handle the data in a column.
The key argument can be used to map the values in column j, for example:
key=lambda date: date.year to group Date objects by year.
"""
if isinstance(function, tuple):
function = list(function)
if not isinstance(function, list):
function = [function] * self._m
if len(function) < self._m:
function+= [FIRST] * (self._m - len(function))
for i, f in enumerate(function):
if i == j: # Group column j is always FIRST.
f = FIRST
if f == FIRST:
function[i] = lambda a: a[+0]
if f == LAST:
function[i] = lambda a: a[-1]
if f == COUNT:
function[i] = lambda a: len(a)
if f == MAX:
function[i] = lambda a: max(a)
if f == MIN:
function[i] = lambda a: min(a)
if f == SUM:
function[i] = lambda a: _sum([x for x in a if x is not None])
if f == AVG:
function[i] = lambda a: avg([x for x in a if x is not None])
if f == STDEV:
function[i] = lambda a: stdev([x for x in a if x is not None])
if f == CONCATENATE:
function[i] = lambda a: ",".join(decode_utf8(x) for x in a if x is not None)
J = j
# Map unique values in column j to a list of rows that contain this value.
g = {}; [g.setdefault(key(v), []).append(i) for i, v in enumerate(self.columns[j])]
# Map unique values in column j to a sort index in the new, grouped list.
o = [(g[v][0], v) for v in g]
o = dict([(v, i) for i, (ii,v) in enumerate(sorted(o))])
# Create a list of rows with unique values in column j,
# applying the group function to the other columns.
u = [None] * len(o)
for v in g:
# List the column values for each group row.
u[o[v]] = [[list.__getitem__(self, i)[j] for i in g[v]] for j in range(self._m)]
# Apply the group function to each row, except the unique value in column j.
u[o[v]] = [function[j](column) for j, column in enumerate(u[o[v]])]
u[o[v]][J] = v#list.__getitem__(self, i)[J]
return Datasheet(rows=u)
def map(self, function=lambda item: item):
""" Applies the given function to each item in the matrix.
"""
for i, row in enumerate(self):
for j, item in enumerate(row):
row[j] = function(item)
def slice(self, i, j, n, m):
""" Returns a new Datasheet starting at row i and column j and spanning n rows and m columns.
"""
return Datasheet(rows=[list.__getitem__(self, i)[j:j+m] for i in range(i, i+n)])
def copy(self, rows=ALL, columns=ALL):
""" Returns a new Datasheet from a selective list of row and/or column indices.
"""
if rows == ALL and columns == ALL:
return Datasheet(rows=self)
if rows == ALL:
return Datasheet(rows=zip(*(self.columns[j] for j in columns)))
if columns == ALL:
return Datasheet(rows=(self.rows[i] for i in rows))
z = zip(*(self.columns[j] for j in columns))
return Datasheet(rows=(z[i] for i in rows))
@property
def json(self):
""" Returns a JSON-string, as a list of dictionaries (if fields are defined) or as a list of lists.
This is useful for sending a Datasheet to JavaScript, for example.
"""
if self.fields is not None:
return json.dumps([dict((f[0], row[i]) for i, f in enumerate(self.fields)) for row in self])
else:
return json.dumps(self)
@property
def array(self):
""" Returns a NumPy array.
Arrays must have elements of the same type, and rows of equal size.
"""
import numpy
return numpy.array(self)
def flip(datasheet):
""" Returns a new datasheet with rows for columns and columns for rows.
"""
return Datasheet(rows=datasheet.columns)
def csv(*args, **kwargs):
""" Returns a Datasheet from the given CSV file path.
"""
return Datasheet.load(*args, **kwargs)
#--- DATASHEET ROWS --------------------------------------------------------------------------------
# Datasheet.rows mimics the operations on Datasheet:
class DatasheetRows(list):
def __init__(self, datasheet):
self._datasheet = datasheet
def __setitem__(self, i, row):
self._datasheet.pop(i)
self._datasheet.insert(i, row)
def __getitem__(self, i):
return list.__getitem__(self._datasheet, i)
def __delitem__(self, i):
self.pop(i)
def __len__(self):
return len(self._datasheet)
def __iter__(self):
for i in xrange(len(self)): yield list.__getitem__(self._datasheet, i)
def __repr__(self):
return repr(self._datasheet)
def __add__(self, row):
raise TypeError, "unsupported operand type(s) for +: 'Datasheet.rows' and '%s'" % row.__class__.__name__
def __iadd__(self, row):
self.append(row); return self
def __eq__(self, rows):
return self._datasheet.__eq__(rows)
def __ne__(self, rows):
return self._datasheet.__ne__(rows)
def insert(self, i, row, default=None):
self._datasheet.insert(i, row, default)
def append(self, row, default=None):
self._datasheet.append(row, default)
def extend(self, rows, default=None):
self._datasheet.extend(rows, default)
def remove(self, row):
self._datasheet.remove(row)
def pop(self, i):
return self._datasheet.pop(i)
def count(self, row):
return self._datasheet.count(row)
def index(self, row):
return self._datasheet.index(row)
def sort(self, cmp=None, key=None, reverse=False):
self._datasheet.sort(cmp, key, reverse)
def reverse(self):
self._datasheet.reverse()
def swap(self, i1, i2):
self[i1], self[i2] = self[i2], self[i1]
#--- DATASHEET COLUMNS -----------------------------------------------------------------------------
class DatasheetColumns(list):
def __init__(self, datasheet):
self._datasheet = datasheet
self._cache = {} # Keep a reference to DatasheetColumn objects generated with Datasheet.columns[j].
# This way we can unlink them when they are deleted.
def __setitem__(self, j, column):
if self._datasheet.fields is not None and j < len(self._datasheet.fields):
# Preserve the column header if it exists.
f = self._datasheet.fields[j]
else:
f = None
self.pop(j)
self.insert(j, column, field=f)
def __getitem__(self, j):
if j < 0: j = j % len(self) # DatasheetColumns[-1]
if j >= len(self):
raise IndexError, "list index out of range"
return self._cache.setdefault(j, DatasheetColumn(self._datasheet, j))
def __delitem__(self, j):
self.pop(j)
def __len__(self):
return len(self._datasheet) > 0 and len(self._datasheet[0]) or 0
def __iter__(self):
for i in xrange(len(self)): yield self.__getitem__(i)
def __repr__(self):
return repr(list(iter(self)))
def __add__(self, column):
raise TypeError, "unsupported operand type(s) for +: 'Datasheet.columns' and '%s'" % column.__class__.__name__
def __iadd__(self, column):
self.append(column); return self
def __eq__(self, columns):
return list(self) == columns
def __ne__(self, columns):
return not self.__eq__(self, columns)
def insert(self, j, column, default=None, field=None):
""" Inserts the given column into the matrix.
Missing rows at the end (bottom) will be filled with the default value.
"""
try: column = [v for v in column]
except:
raise TypeError, "Datasheet.columns.insert(x): x must be list"
column = column + [default] * (len(self._datasheet) - len(column))
if len(column) > len(self._datasheet):
self._datasheet.extend([[None]] * (len(column)-len(self._datasheet)))
for i, row in enumerate(self._datasheet):
row.insert(j, column[i])
self._datasheet.__dict__["_m"] += 1 # Increase column count.
# Add a new header.
if self._datasheet.fields is not None:
self._datasheet.fields += [(None, None)] * (len(self) - len(self._datasheet.fields) - 1)
self._datasheet.fields.insert(j, field or (None, None))
def append(self, column, default=None, field=None):
self.insert(len(self), column, default, field)
def extend(self, columns, default=None, fields=[]):
for j, column in enumerate(columns):
self.insert(len(self), column, default, j<len(fields) and fields[j] or None)
def remove(self, column):
if isinstance(column, DatasheetColumn) and column._datasheet == self._datasheet:
self.pop(column._j); return
raise ValueError, "list.remove(x): x not in list"
def pop(self, j):
column = list(self[j]) # Return a list copy.
for row in self._datasheet:
row.pop(j)
# At one point a DatasheetColumn object was created with Datasheet.columns[j].
# It might still be in use somewhere, so we unlink it from the datasheet:
self._cache[j]._datasheet = Datasheet(rows=[[v] for v in column])
self._cache[j]._j = 0
self._cache.pop(j)
for k in range(j+1, len(self)+1):
if k in self._cache:
# Shift the DatasheetColumn objects on the right to the left.
self._cache[k-1] = self._cache.pop(k)
self._cache[k-1]._j = k-1
self._datasheet.__dict__["_m"] -= 1 # Decrease column count.
# Remove the header.
if self._datasheet.fields is not None:
self._datasheet.fields.pop(j)
return column
def count(self, column):
return len([True for c in self if c == column])
def index(self, column):
if isinstance(column, DatasheetColumn) and column._datasheet == self._datasheet:
return column._j
return list(self).index(column)
def sort(self, cmp=None, key=None, reverse=False, order=None):
# This makes most sense if the order in which columns should appear is supplied.
o = order and order or _order(self, cmp, key, reverse)
for i, row in enumerate(self._datasheet):
# The main difficulty is modifying each row in-place,
# since other variables might be referring to it.
r=list(row); [row.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]
# Reorder the datasheet headers.
if self._datasheet.fields is not None:
self._datasheet.fields = [self._datasheet.fields[i] for i in o]
def swap(self, j1, j2):
self[j1], self[j2] = self[j2], self[j1]
# Reorder the datasheet headers.
if self._datasheet.fields is not None:
self._datasheet.fields[j1], self._datasheet.fields[j2] = (
self._datasheet.fields[j2],
self._datasheet.fields[j1])
#--- DATASHEET COLUMN ------------------------------------------------------------------------------
class DatasheetColumn(list):
def __init__(self, datasheet, j):
""" A dynamic column in a Datasheet.
If the actual column is deleted with Datasheet.columns.remove() or Datasheet.columms.pop(),
the DatasheetColumn object will be orphaned (i.e., it is no longer part of the table).
"""
self._datasheet = datasheet
self._j = j
def __getitem__(self, i):
return list.__getitem__(self._datasheet, i)[self._j]
def __setitem__(self, i, value):
list.__getitem__(self._datasheet, i)[self._j] = value
def __len__(self):
return len(self._datasheet)
def __iter__(self): # Can be put more simply but optimized for performance:
for i in xrange(len(self)): yield list.__getitem__(self._datasheet, i)[self._j]
def __repr__(self):
return repr(list(iter(self)))
def __gt__(self, column):
return list(self) > list(column)
def __lt__(self, column):
return list(self) < list(column)
def __ge__(self, column):
return list(self) >= list(column)
def __le__(self, column):
return list(self) <= list(column)
def __eq__(self, column):
return list(self) == column
def __ne__(self, column):
return not self.__eq__(column)
def __add__(self, value):
raise TypeError, "unsupported operand type(s) for +: 'Datasheet.columns[x]' and '%s'" % value.__class__.__name__
def __iadd__(self, value):
self.append(value); return self
def __contains__(self, value):
for v in self:
if v == value: return True
return False
def count(self, value):
return len([True for v in self if v == value])
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError, "list.index(x): x not in list"
def remove(self, value):
""" Removes the matrix row that has the given value in this column.
"""
for i, v in enumerate(self):
if v == value:
self._datasheet.pop(i); return
raise ValueError, "list.remove(x): x not in list"
def pop(self, i):
""" Removes the entire row from the matrix and returns the value at the given index.
"""
row = self._datasheet.pop(i); return row[self._j]
def sort(self, cmp=None, key=None, reverse=False):
""" Sorts the rows in the matrix according to the values in this column,
e.g. clicking ascending / descending on a column header in a datasheet viewer.
"""
o = order(list(self), cmp, key, reverse)
# Modify the table in place, more than one variable may be referencing it:
r=list(self._datasheet); [self._datasheet.__setitem__(i2, r[i1]) for i2, i1 in enumerate(o)]
def insert(self, i, value, default=None):
""" Inserts the given value in the column.
This will create a new row in the matrix, where other columns are set to the default.
"""
self._datasheet.insert(i, [default]*self._j + [value] + [default]*(len(self._datasheet)-self._j-1))
def append(self, value, default=None):
self.insert(len(self), value, default)
def extend(self, values, default=None):
for value in values:
self.insert(len(self), value, default)
def map(self, function=lambda value: value):
""" Applies the given function to each value in the column.
"""
for j, value in enumerate(self):
self[j] = function(value)
def swap(self, i1, i2):
self._datasheet.swap(i1, i2)
#---------------------------------------------------------------------------------------------------
_UID = 0
def uid():
global _UID; _UID+=1; return _UID
def truncate(string, length=100):
""" Returns a (head, tail)-tuple, where the head string length is less than the given length.
Preferably the string is split at a space, otherwise a hyphen ("-") is injected.
"""
if len(string) <= length:
return string, ""
n, words = 0, string.split(" ")
for i, w in enumerate(words):
if n + len(w) > length:
break
n += len(w) + 1
if i == 0 and len(w) > length:
return ( w[:length-1] + "-",
(w[length-1:] + " " + " ".join(words[1:])).strip())
return (" ".join(words[:i]),
" ".join(words[i:]))
_truncate = truncate
def pprint(datasheet, truncate=40, padding=" ", fill="."):
""" Prints a string where the rows in the datasheet are organized in outlined columns.
"""
# Calculate the width of each column, based on the longest field in each column.
# Long fields can be split across different lines, so we need to check each line.
w = [0 for column in datasheet.columns]
R = []
for i, row in enumerate(datasheet.rows):
fields = []
for j, v in enumerate(row):
# Cast each field in the row to a string.
# Strings that span beyond the maximum column width are wrapped.
# Thus, each "field" in the row is a list of lines.
head, tail = _truncate(decode_utf8(v), truncate)
lines = []
lines.append(head)
w[j] = max(w[j], len(head))
while len(tail) > 0:
head, tail = _truncate(tail, truncate)
lines.append(head)
w[j] = max(w[j], len(head))
fields.append(lines)
R.append(fields)
for i, fields in enumerate(R):
# Add empty lines to each field so they are of equal height.
n = max([len(lines) for lines in fields])
fields = [lines+[""] * (n-len(lines)) for lines in fields]
# Print the row line per line, justifying the fields with spaces.
for k in range(n):
for j, lines in enumerate(fields):
s = lines[k]
s += ((k==0 or len(lines[k]) > 0) and fill or " ") * (w[j] - len(lines[k]))
s += padding
print s,
print
| bsd-3-clause |
jicruz/heroku-bot | lib/pip/_vendor/distlib/_backport/sysconfig.py | 327 | 26955 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from ..resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On macOS before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
| gpl-3.0 |
naparuba/opsbro | data/global-configuration/packs/mongodb/collectors/pymongo/pool.py | 17 | 34410 | # Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import contextlib
import os
import platform
import socket
import sys
import threading
try:
import ssl
from ssl import SSLError
_HAVE_SNI = getattr(ssl, 'HAS_SNI', False)
except ImportError:
_HAVE_SNI = False
class SSLError(socket.error):
pass
from bson import DEFAULT_CODEC_OPTIONS
from bson.py3compat import imap, itervalues, _unicode
from bson.son import SON
from pymongo import auth, helpers, thread_util, __version__
from pymongo.common import MAX_MESSAGE_SIZE
from pymongo.errors import (AutoReconnect,
ConnectionFailure,
ConfigurationError,
DocumentTooLarge,
NetworkTimeout,
NotMasterError,
OperationFailure)
from pymongo.ismaster import IsMaster
from pymongo.monotonic import time as _time
from pymongo.network import (command,
receive_message,
SocketChecker)
from pymongo.read_concern import DEFAULT_READ_CONCERN
from pymongo.read_preferences import ReadPreference
from pymongo.server_type import SERVER_TYPE
# Always use our backport so we always have support for IP address matching
from pymongo.ssl_match_hostname import match_hostname, CertificateError
# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are
# not permitted for SNI hostname.
try:
from ipaddress import ip_address
def is_ip_address(address):
try:
ip_address(_unicode(address))
return True
except (ValueError, UnicodeError):
return False
except ImportError:
if hasattr(socket, 'inet_pton') and socket.has_ipv6:
# Most *nix, recent Windows
def is_ip_address(address):
try:
# inet_pton rejects IPv4 literals with leading zeros
# (e.g. 192.168.0.01), inet_aton does not, and we
# can connect to them without issue. Use inet_aton.
socket.inet_aton(address)
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, address)
return True
except socket.error:
return False
else:
# No inet_pton
def is_ip_address(address):
try:
socket.inet_aton(address)
return True
except socket.error:
if ':' in address:
# ':' is not a valid character for a hostname. If we get
# here a few things have to be true:
# - We're on a recent version of python 2.7 (2.7.9+).
# 2.6 and older 2.7 versions don't support SNI.
# - We're on Windows XP or some unusual Unix that doesn't
# have inet_pton.
# - The application is using IPv6 literals with TLS, which
# is pretty unusual.
return True
return False
try:
from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC
def _set_non_inheritable_non_atomic(fd):
"""Set the close-on-exec flag on the given file descriptor."""
flags = fcntl(fd, F_GETFD)
fcntl(fd, F_SETFD, flags | FD_CLOEXEC)
except ImportError:
# Windows, various platforms we don't claim to support
# (Jython, IronPython, ...), systems that don't provide
# everything we need from fcntl, etc.
def _set_non_inheritable_non_atomic(dummy):
"""Dummy function for platforms that don't provide fcntl."""
pass
_METADATA = SON([
('driver', SON([('name', 'PyMongo'), ('version', __version__)])),
])
if sys.platform.startswith('linux'):
_METADATA['os'] = SON([
('type', platform.system()),
# Distro name and version (e.g. Ubuntu 16.04 xenial)
('name', ' '.join([part for part in
platform.linux_distribution() if part])),
('architecture', platform.machine()),
# Kernel version (e.g. 4.4.0-17-generic).
('version', platform.release())
])
elif sys.platform == 'darwin':
_METADATA['os'] = SON([
('type', platform.system()),
('name', platform.system()),
('architecture', platform.machine()),
# (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin
# kernel version.
('version', platform.mac_ver()[0])
])
elif sys.platform == 'win32':
_METADATA['os'] = SON([
('type', platform.system()),
# "Windows XP", "Windows 7", "Windows 10", etc.
('name', ' '.join((platform.system(), platform.release()))),
('architecture', platform.machine()),
# Windows patch level (e.g. 5.1.2600-SP3)
('version', '-'.join(platform.win32_ver()[1:3]))
])
elif sys.platform.startswith('java'):
_name, _ver, _arch = platform.java_ver()[-1]
_METADATA['os'] = SON([
# Linux, Windows 7, Mac OS X, etc.
('type', _name),
('name', _name),
# x86, x86_64, AMD64, etc.
('architecture', _arch),
# Linux kernel version, OSX version, etc.
('version', _ver)
])
else:
# Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11)
_aliased = platform.system_alias(
platform.system(), platform.release(), platform.version())
_METADATA['os'] = SON([
('type', platform.system()),
('name', ' '.join([part for part in _aliased[:2] if part])),
('architecture', platform.machine()),
('version', _aliased[2])
])
if platform.python_implementation().startswith('PyPy'):
_METADATA['platform'] = ' '.join(
(platform.python_implementation(),
'.'.join(imap(str, sys.pypy_version_info)),
'(Python %s)' % '.'.join(imap(str, sys.version_info))))
elif sys.platform.startswith('java'):
_METADATA['platform'] = ' '.join(
(platform.python_implementation(),
'.'.join(imap(str, sys.version_info)),
'(%s)' % ' '.join((platform.system(), platform.release()))))
else:
_METADATA['platform'] = ' '.join(
(platform.python_implementation(),
'.'.join(imap(str, sys.version_info))))
# If the first getaddrinfo call of this interpreter's life is on a thread,
# while the main thread holds the import lock, getaddrinfo deadlocks trying
# to import the IDNA codec. Import it here, where presumably we're on the
# main thread, to avoid the deadlock. See PYTHON-607.
u'foo'.encode('idna')
def _raise_connection_failure(address, error):
"""Convert a socket.error to ConnectionFailure and raise it."""
host, port = address
# If connecting to a Unix socket, port will be None.
if port is not None:
msg = '%s:%d: %s' % (host, port, error)
else:
msg = '%s: %s' % (host, error)
if isinstance(error, socket.timeout):
raise NetworkTimeout(msg)
elif isinstance(error, SSLError) and 'timed out' in str(error):
# CPython 2.6, 2.7, PyPy 2.x, and PyPy3 do not distinguish network
# timeouts from other SSLErrors (https://bugs.python.org/issue10272).
# Luckily, we can work around this limitation because the phrase
# 'timed out' appears in all the timeout related SSLErrors raised
# on the above platforms. CPython >= 3.2 and PyPy3.3 correctly raise
# socket.timeout.
raise NetworkTimeout(msg)
else:
raise AutoReconnect(msg)
class PoolOptions(object):
__slots__ = ('__max_pool_size', '__min_pool_size', '__max_idle_time_ms',
'__connect_timeout', '__socket_timeout',
'__wait_queue_timeout', '__wait_queue_multiple',
'__ssl_context', '__ssl_match_hostname', '__socket_keepalive',
'__event_listeners', '__appname', '__metadata')
def __init__(self, max_pool_size=100, min_pool_size=0,
max_idle_time_ms=None, connect_timeout=None,
socket_timeout=None, wait_queue_timeout=None,
wait_queue_multiple=None, ssl_context=None,
ssl_match_hostname=True, socket_keepalive=False,
event_listeners=None, appname=None):
self.__max_pool_size = max_pool_size
self.__min_pool_size = min_pool_size
self.__max_idle_time_ms = max_idle_time_ms
self.__connect_timeout = connect_timeout
self.__socket_timeout = socket_timeout
self.__wait_queue_timeout = wait_queue_timeout
self.__wait_queue_multiple = wait_queue_multiple
self.__ssl_context = ssl_context
self.__ssl_match_hostname = ssl_match_hostname
self.__socket_keepalive = socket_keepalive
self.__event_listeners = event_listeners
self.__appname = appname
self.__metadata = _METADATA.copy()
if appname:
self.__metadata['application'] = {'name': appname}
@property
def max_pool_size(self):
"""The maximum allowable number of concurrent connections to each
connected server. Requests to a server will block if there are
`maxPoolSize` outstanding connections to the requested server.
Defaults to 100. Cannot be 0.
When a server's pool has reached `max_pool_size`, operations for that
server block waiting for a socket to be returned to the pool. If
``waitQueueTimeoutMS`` is set, a blocked operation will raise
:exc:`~pymongo.errors.ConnectionFailure` after a timeout.
By default ``waitQueueTimeoutMS`` is not set.
"""
return self.__max_pool_size
@property
def min_pool_size(self):
"""The minimum required number of concurrent connections that the pool
will maintain to each connected server. Default is 0.
"""
return self.__min_pool_size
@property
def max_idle_time_ms(self):
"""The maximum number of milliseconds that a connection can remain
idle in the pool before being removed and replaced. Defaults to
`None` (no limit).
"""
return self.__max_idle_time_ms
@property
def connect_timeout(self):
"""How long a connection can take to be opened before timing out.
"""
return self.__connect_timeout
@property
def socket_timeout(self):
"""How long a send or receive on a socket can take before timing out.
"""
return self.__socket_timeout
@property
def wait_queue_timeout(self):
"""How long a thread will wait for a socket from the pool if the pool
has no free sockets.
"""
return self.__wait_queue_timeout
@property
def wait_queue_multiple(self):
"""Multiplied by max_pool_size to give the number of threads allowed
to wait for a socket at one time.
"""
return self.__wait_queue_multiple
@property
def ssl_context(self):
"""An SSLContext instance or None.
"""
return self.__ssl_context
@property
def ssl_match_hostname(self):
"""Call ssl.match_hostname if cert_reqs is not ssl.CERT_NONE.
"""
return self.__ssl_match_hostname
@property
def socket_keepalive(self):
"""Whether to send periodic messages to determine if a connection
is closed.
"""
return self.__socket_keepalive
@property
def event_listeners(self):
"""An instance of pymongo.monitoring._EventListeners.
"""
return self.__event_listeners
@property
def appname(self):
"""The application name, for sending with ismaster in server handshake.
"""
return self.__appname
@property
def metadata(self):
"""A dict of metadata about the application, driver, os, and platform.
"""
return self.__metadata.copy()
class SocketInfo(object):
"""Store a socket with some metadata.
:Parameters:
- `sock`: a raw socket object
- `pool`: a Pool instance
- `ismaster`: optional IsMaster instance, response to ismaster on `sock`
- `address`: the server's (host, port)
"""
def __init__(self, sock, pool, ismaster, address):
self.sock = sock
self.address = address
self.authset = set()
self.closed = False
self.last_checkout = _time()
self.is_writable = ismaster.is_writable if ismaster else None
self.max_wire_version = ismaster.max_wire_version if ismaster else None
self.max_bson_size = ismaster.max_bson_size if ismaster else None
self.max_message_size = (
ismaster.max_message_size if ismaster else MAX_MESSAGE_SIZE)
self.max_write_batch_size = (
ismaster.max_write_batch_size if ismaster else None)
self.listeners = pool.opts.event_listeners
if ismaster:
self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos
else:
self.is_mongos = None
# The pool's pool_id changes with each reset() so we can close sockets
# created before the last reset.
self.pool_id = pool.pool_id
def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False,
read_concern=DEFAULT_READ_CONCERN,
write_concern=None,
parse_write_concern_error=False,
collation=None):
"""Execute a command or raise ConnectionFailure or OperationFailure.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
- `read_concern`: The read concern for this command.
- `write_concern`: The write concern for this command.
- `parse_write_concern_error`: Whether to parse the
``writeConcernError`` field in the command response.
- `collation`: The collation for this command.
"""
if self.max_wire_version < 4 and not read_concern.ok_for_legacy:
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (read_concern.level, self.max_wire_version))
if not (write_concern is None or write_concern.acknowledged or
collation is None):
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if self.max_wire_version >= 5 and write_concern:
spec['writeConcern'] = write_concern.document
elif self.max_wire_version < 5 and collation is not None:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use a collation.')
try:
return command(self.sock, dbname, spec, slave_ok,
self.is_mongos, read_preference, codec_options,
check, allowable_errors, self.address,
check_keys, self.listeners, self.max_bson_size,
read_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation)
except OperationFailure:
raise
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
except BaseException as error:
self._raise_connection_failure(error)
def send_message(self, message, max_doc_size):
"""Send a raw BSON message or raise ConnectionFailure.
If a network exception is raised, the socket is closed.
"""
if (self.max_bson_size is not None
and max_doc_size > self.max_bson_size):
raise DocumentTooLarge(
"BSON document too large (%d bytes) - the connected server "
"supports BSON document sizes up to %d bytes." %
(max_doc_size, self.max_bson_size))
try:
self.sock.sendall(message)
except BaseException as error:
self._raise_connection_failure(error)
def receive_message(self, operation, request_id):
"""Receive a raw BSON message or raise ConnectionFailure.
If any exception is raised, the socket is closed.
"""
try:
return receive_message(
self.sock, operation, request_id, self.max_message_size)
except BaseException as error:
self._raise_connection_failure(error)
def legacy_write(self, request_id, msg, max_doc_size, with_last_error):
"""Send OP_INSERT, etc., optionally returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, an OP_INSERT, OP_UPDATE, or OP_DELETE message,
perhaps with a getlasterror command appended.
- `max_doc_size`: size in bytes of the largest document in `msg`.
- `with_last_error`: True if a getlasterror command is appended.
"""
if not with_last_error and not self.is_writable:
# Write won't succeed, bail as if we'd done a getlasterror.
raise NotMasterError("not master")
self.send_message(msg, max_doc_size)
if with_last_error:
response = self.receive_message(1, request_id)
return helpers._check_gle_response(response)
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result
def check_auth(self, all_credentials):
"""Update this socket's authentication.
Log in or out to bring this socket's credentials up to date with
those provided. Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
"""
if all_credentials or self.authset:
cached = set(itervalues(all_credentials))
authset = self.authset.copy()
# Logout any credentials that no longer exist in the cache.
for credentials in authset - cached:
auth.logout(credentials.source, self)
self.authset.discard(credentials)
for credentials in cached - authset:
auth.authenticate(credentials, self)
self.authset.add(credentials)
def authenticate(self, credentials):
"""Log in to the server and store these credentials in `authset`.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `credentials`: A MongoCredential.
"""
auth.authenticate(credentials, self)
self.authset.add(credentials)
def close(self):
self.closed = True
# Avoid exceptions on interpreter shutdown.
try:
self.sock.close()
except:
pass
def _raise_connection_failure(self, error):
# Catch *all* exceptions from socket methods and close the socket. In
# regular Python, socket operations only raise socket.error, even if
# the underlying cause was a Ctrl-C: a signal raised during socket.recv
# is expressed as an EINTR error from poll. See internal_select_ex() in
# socketmodule.c. All error codes from poll become socket.error at
# first. Eventually in PyEval_EvalFrameEx the interpreter checks for
# signals and throws KeyboardInterrupt into the current frame on the
# main thread.
#
# But in Gevent and Eventlet, the polling mechanism (epoll, kqueue,
# ...) is called in Python code, which experiences the signal as a
# KeyboardInterrupt from the start, rather than as an initial
# socket.error, so we catch that, close the socket, and reraise it.
self.close()
if isinstance(error, socket.error):
_raise_connection_failure(self.address, error)
else:
raise error
def __eq__(self, other):
return self.sock == other.sock
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.sock)
def __repr__(self):
return "SocketInfo(%s)%s at %s" % (
repr(self.sock),
self.closed and " CLOSED" or "",
id(self)
)
def _create_connection(address, options):
"""Given (host, port) and PoolOptions, connect and return a socket object.
Can raise socket.error.
This is a modified version of create_connection from CPython >= 2.6.
"""
host, port = address
# Check if dealing with a unix domain socket
if host.endswith('.sock'):
if not hasattr(socket, "AF_UNIX"):
raise ConnectionFailure("UNIX-sockets are not supported "
"on this system")
sock = socket.socket(socket.AF_UNIX)
# SOCK_CLOEXEC not supported for Unix sockets.
_set_non_inheritable_non_atomic(sock.fileno())
try:
sock.connect(host)
return sock
except socket.error:
sock.close()
raise
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
err = None
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, dummy, sa = res
# SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited
# number of platforms (newer Linux and *BSD). Starting with CPython 3.4
# all file descriptors are created non-inheritable. See PEP 446.
try:
sock = socket.socket(
af, socktype | getattr(socket, 'SOCK_CLOEXEC', 0), proto)
except socket.error:
# Can SOCK_CLOEXEC be defined even if the kernel doesn't support
# it?
sock = socket.socket(af, socktype, proto)
# Fallback when SOCK_CLOEXEC isn't available.
_set_non_inheritable_non_atomic(sock.fileno())
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(options.connect_timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE,
options.socket_keepalive)
sock.connect(sa)
return sock
except socket.error as e:
err = e
sock.close()
if err is not None:
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpreter that doesn't
# support IPv6. The test case is Jython2.5.1 which doesn't
# support IPv6 at all.
raise socket.error('getaddrinfo failed')
def _configured_socket(address, options):
"""Given (host, port) and PoolOptions, return a configured socket.
Can raise socket.error, ConnectionFailure, or CertificateError.
Sets socket's SSL and timeout options.
"""
sock = _create_connection(address, options)
ssl_context = options.ssl_context
if ssl_context is not None:
host = address[0]
try:
# According to RFC6066, section 3, IPv4 and IPv6 literals are
# not permitted for SNI hostname.
if _HAVE_SNI and not is_ip_address(host):
sock = ssl_context.wrap_socket(sock, server_hostname=host)
else:
sock = ssl_context.wrap_socket(sock)
except IOError as exc:
sock.close()
raise ConnectionFailure("SSL handshake failed: %s" % (str(exc),))
if ssl_context.verify_mode and options.ssl_match_hostname:
try:
match_hostname(sock.getpeercert(), hostname=host)
except CertificateError:
sock.close()
raise
sock.settimeout(options.socket_timeout)
return sock
# Do *not* explicitly inherit from object or Jython won't call __del__
# http://bugs.jython.org/issue1057
class Pool:
def __init__(self, address, options, handshake=True):
"""
:Parameters:
- `address`: a (hostname, port) tuple
- `options`: a PoolOptions instance
- `handshake`: whether to call ismaster for each new SocketInfo
"""
# Check a socket's health with socket_closed() every once in a while.
# Can override for testing: 0 to always check, None to never check.
self._check_interval_seconds = 1
self.sockets = set()
self.lock = threading.Lock()
self.active_sockets = 0
# Keep track of resets, so we notice sockets created before the most
# recent reset and close them.
self.pool_id = 0
self.pid = os.getpid()
self.address = address
self.opts = options
self.handshake = handshake
if (self.opts.wait_queue_multiple is None or
self.opts.max_pool_size is None):
max_waiters = None
else:
max_waiters = (
self.opts.max_pool_size * self.opts.wait_queue_multiple)
self._socket_semaphore = thread_util.create_semaphore(
self.opts.max_pool_size, max_waiters)
self.socket_checker = SocketChecker()
def reset(self):
with self.lock:
self.pool_id += 1
self.pid = os.getpid()
sockets, self.sockets = self.sockets, set()
self.active_sockets = 0
for sock_info in sockets:
sock_info.close()
def remove_stale_sockets(self):
with self.lock:
if self.opts.max_idle_time_ms is not None:
for sock_info in self.sockets.copy():
age = _time() - sock_info.last_checkout
if age > self.opts.max_idle_time_ms:
self.sockets.remove(sock_info)
sock_info.close()
while len(
self.sockets) + self.active_sockets < self.opts.min_pool_size:
sock_info = self.connect()
with self.lock:
self.sockets.add(sock_info)
def connect(self):
"""Connect to Mongo and return a new SocketInfo.
Can raise ConnectionFailure or CertificateError.
Note that the pool does not keep a reference to the socket -- you
must call return_socket() when you're done with it.
"""
sock = None
try:
sock = _configured_socket(self.address, self.opts)
if self.handshake:
cmd = SON([
('ismaster', 1),
('client', self.opts.metadata)
])
ismaster = IsMaster(
command(sock,
'admin',
cmd,
False,
False,
ReadPreference.PRIMARY,
DEFAULT_CODEC_OPTIONS))
else:
ismaster = None
return SocketInfo(sock, self, ismaster, self.address)
except socket.error as error:
if sock is not None:
sock.close()
_raise_connection_failure(self.address, error)
@contextlib.contextmanager
def get_socket(self, all_credentials, checkout=False):
"""Get a socket from the pool. Use with a "with" statement.
Returns a :class:`SocketInfo` object wrapping a connected
:class:`socket.socket`.
This method should always be used in a with-statement::
with pool.get_socket(credentials, checkout) as socket_info:
socket_info.send_message(msg)
data = socket_info.receive_message(op_code, request_id)
The socket is logged in or out as needed to match ``all_credentials``
using the correct authentication mechanism for the server's wire
protocol version.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
- `checkout` (optional): keep socket checked out.
"""
# First get a socket, then attempt authentication. Simplifies
# semaphore management in the face of network errors during auth.
sock_info = self._get_socket_no_auth()
try:
sock_info.check_auth(all_credentials)
yield sock_info
except:
# Exception in caller. Decrement semaphore.
self.return_socket(sock_info)
raise
else:
if not checkout:
self.return_socket(sock_info)
def _get_socket_no_auth(self):
"""Get or create a SocketInfo. Can raise ConnectionFailure."""
# We use the pid here to avoid issues with fork / multiprocessing.
# See test.test_client:TestClient.test_fork for an example of
# what could go wrong otherwise
if self.pid != os.getpid():
self.reset()
# Get a free socket or create one.
if not self._socket_semaphore.acquire(
True, self.opts.wait_queue_timeout):
self._raise_wait_queue_timeout()
with self.lock:
self.active_sockets += 1
# We've now acquired the semaphore and must release it on error.
try:
try:
# set.pop() isn't atomic in Jython less than 2.7, see
# http://bugs.jython.org/issue1854
with self.lock:
sock_info, from_pool = self.sockets.pop(), True
except KeyError:
# Can raise ConnectionFailure or CertificateError.
sock_info, from_pool = self.connect(), False
# If socket is idle, open a new one.
if self.opts.max_idle_time_ms is not None:
age = _time() - sock_info.last_checkout
if age > self.opts.max_idle_time_ms:
sock_info.close()
sock_info, from_pool = self.connect(), False
if from_pool:
# Can raise ConnectionFailure.
sock_info = self._check(sock_info)
except:
self._socket_semaphore.release()
with self.lock:
self.active_sockets -= 1
raise
sock_info.last_checkout = _time()
return sock_info
def return_socket(self, sock_info):
"""Return the socket to the pool, or if it's closed discard it."""
if self.pid != os.getpid():
self.reset()
else:
if sock_info.pool_id != self.pool_id:
sock_info.close()
elif not sock_info.closed:
with self.lock:
self.sockets.add(sock_info)
self._socket_semaphore.release()
with self.lock:
self.active_sockets -= 1
def _check(self, sock_info):
"""This side-effecty function checks if this pool has been reset since
the last time this socket was used, or if the socket has been closed by
some external network error, and if so, attempts to create a new socket.
If this connection attempt fails we reset the pool and reraise the
ConnectionFailure.
Checking sockets lets us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid AutoReconnects completely anyway.
"""
error = False
# How long since socket was last checked out.
age = _time() - sock_info.last_checkout
if (self._check_interval_seconds is not None
and (
0 == self._check_interval_seconds
or age > self._check_interval_seconds)):
if self.socket_checker.socket_closed(sock_info.sock):
sock_info.close()
error = True
if not error:
return sock_info
else:
return self.connect()
def _raise_wait_queue_timeout(self):
raise ConnectionFailure(
'Timed out waiting for socket from pool with max_size %r and'
' wait_queue_timeout %r' % (
self.opts.max_pool_size, self.opts.wait_queue_timeout))
def __del__(self):
# Avoid ResourceWarnings in Python 3
for sock_info in self.sockets:
sock_info.close()
| mit |
stuntman723/rap-analyzer | rap_analyzer/lib/python2.7/site-packages/wheel/pkginfo.py | 565 | 1225 | """Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
| mit |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/test/test_macpath.py | 55 | 2190 | import macpath
from test import test_support
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assert_(macpath.abspath("xx:yy") == "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assert_(isabs("xx:yy"))
self.assert_(isabs("xx:yy:"))
self.assert_(isabs("xx:"))
self.failIf(isabs("foo"))
self.failIf(isabs(":foo"))
self.failIf(isabs(":foo:bar"))
self.failIf(isabs(":foo:bar:"))
def test_commonprefix(self):
commonprefix = macpath.commonprefix
self.assert_(commonprefix(["home:swenson:spam", "home:swen:spam"])
== "home:swen")
self.assert_(commonprefix([":home:swen:spam", ":home:swen:eggs"])
== ":home:swen:")
self.assert_(commonprefix([":home:swen:spam", ":home:swen:spam"])
== ":home:swen:spam")
def test_split(self):
split = macpath.split
self.assertEquals(split("foo:bar"),
('foo:', 'bar'))
self.assertEquals(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEquals(split(":"), ('', ''))
self.assertEquals(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
def test_splitdrive(self):
splitdrive = macpath.splitdrive
self.assertEquals(splitdrive("foo:bar"), ('', 'foo:bar'))
self.assertEquals(splitdrive(":foo:bar"), ('', ':foo:bar'))
def test_splitext(self):
splitext = macpath.splitext
self.assertEquals(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEquals(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEquals(splitext(".ext"), ('.ext', ''))
self.assertEquals(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEquals(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEquals(splitext(""), ('', ''))
self.assertEquals(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
def test_main():
test_support.run_unittest(MacPathTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
apoelstra/elements | qa/rpc-tests/getchaintips.py | 162 | 2120 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework import BitcoinTestFramework
from util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].setgenerate (True, 10);
self.nodes[2].setgenerate (True, 20);
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit |
zstackio/zstack-woodpecker | integrationtest/vm/multizones/resource/test_delete_cluster.py | 4 | 3530 | '''
New Integration Test for testing deleting cluster
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.export_operations as exp_ops
import zstackwoodpecker.operations.cluster_operations as cluster_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
_config_ = {
'timeout' : 1200,
'noparallel' : True
}
test_obj_dict = test_state.TestStateDict()
curr_deploy_conf = None
cluster1_name = os.environ.get('clusterName1')
def test():
global curr_deploy_conf
#This conf should only be put in test(), since test_lib.deploy_config
# should be set by woodpecker.
curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config)
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
#pick up cluster1
cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0]
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('multizones_basic_vm')
vm_creation_option.set_cluster_uuid(cluster1.uuid)
vm1 = test_lib.lib_create_vm(vm_creation_option)
test_obj_dict.add_vm(vm1)
vm2 = test_lib.lib_create_vm(vm_creation_option)
test_obj_dict.add_vm(vm2)
vm3 = test_lib.lib_create_vm(vm_creation_option)
test_obj_dict.add_vm(vm3)
vm4 = test_lib.lib_create_vm(vm_creation_option)
test_obj_dict.add_vm(vm4)
test_util.test_dsc('delete cluster')
cluster_ops.delete_cluster(cluster1.uuid)
test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED)
test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED)
test_obj_dict.mv_vm(vm3, vm_header.RUNNING, vm_header.STOPPED)
test_obj_dict.mv_vm(vm4, vm_header.RUNNING, vm_header.STOPPED)
vm1.update()
vm2.update()
vm3.update()
vm4.update()
test_lib.lib_robot_status_check(test_obj_dict)
cluster_ops.add_cluster_resource(curr_deploy_conf, cluster1_name)
cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0]
vm_creation_option.set_cluster_uuid(cluster1.uuid)
vm_creation_option.set_l3_uuids([])
vm1.start()
vm2.start()
vm3.start()
vm4.start()
test_lib.lib_robot_status_check(test_obj_dict)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Delete Cluster Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global curr_deploy_conf
cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)
if not cluster1:
try:
cluster_ops.add_cluster_resource(curr_deploy_conf, cluster1_name)
except Exception as e:
test_util.test_warn('Fail to recover all [cluster:] %s resource. It will impact later test case.' % cluster1_name)
raise e
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
danlamanna/scratch | geonotebook/vis/ktile/provider.py | 1 | 7690 | import os
import tempfile
import gdal
import mapnik
import osr
from .vrt import (
ComplexSourceType,
SourceFilenameType,
VRTDataset,
VRTRasterBandType)
try:
from PIL import Image
except ImportError:
# On some systems, PIL.Image is known as Image.
import Image
if 'mapnik' in locals():
_version = hasattr(mapnik, 'mapnik_version') and \
mapnik.mapnik_version() or 701
if _version >= 20000:
Box2d = mapnik.Box2d
else:
Box2d = mapnik.Envelope
DEFAULT_MAP_SRS = 'EPSG:4326'
class MapnikPythonProvider(object):
numpy_to_vrt_type = {
'uint8': 'Byte',
'float32': 'Float32'
}
def __init__(self, layer, **kwargs):
# List of bands to display, should be len == 1 or len == 3
self._bands = kwargs.get('bands', [-1])
self._layer_srs = None
self._static_vrt = kwargs.get("vrt_path", None)
if self._static_vrt is None:
self._vrt_path = os.path.join(
tempfile.mkdtemp(prefix="geonb"),
'{}.vrt'.format(kwargs.get('name', 'no_name')))
self._filepath = None
self.layer = layer
self.filepath = kwargs.get('path', None)
self.map_srs = kwargs.get('map_srs', DEFAULT_MAP_SRS)
self.name = kwargs.get('name', None)
self.raster_x_size = kwargs.get('raster_x_size', None)
self.raster_y_size = kwargs.get('raster_y_size', None)
self.transform = kwargs.get('transform', None)
self.nodata = kwargs.get('nodata', None)
try:
self.dtype = self.numpy_to_vrt_type[kwargs['dtype']]
except KeyError:
self.dtype = None
# Note: The band value mapnik expects. If we are rendering
# an RGB and we have 3 _bands, then we set bands to
# -1. Mapnik will use the ColorInterp from the VRT
# to figure out the bands. Otherwise the VRT will have
# a single VRTRasterBand so we set the band to 1
if self._static_vrt is None:
self.mapnik_band = -1 if len(self._bands) == 3 else 1
else:
# Static VRT's may specify different bands
self.mapnik_band = -1 if len(self._bands) == 3 else self._bands[0]
self.opacity = kwargs.get('opacity', 1)
self.gamma = kwargs.get('gamma', 1)
self.colormap = kwargs.get('colormap', {})
self.scale_factor = None
def serialize(self):
return {
"filepath": self.filepath,
"map_srs": self.map_srs,
"vrt_path": self.vrt_path,
"name": self.name,
"opacity": self.opacity,
"gamma": self.gamma,
"colormap": self.colormap,
"is_static": True if self._static_vrt else False,
"raster_x_size": self.raster_x_size,
"raster_y_size": self.raster_y_size,
"transform": self.transform,
"nodata": self.nodata,
"layer_srs": self.layer_srs
}
def generate_vrt(self):
if self._static_vrt is not None:
return
vrt = VRTDataset(rasterXSize=self.raster_x_size,
rasterYSize=self.raster_y_size)
vrt.SRS = [self.map_srs]
vrt.GeoTransform = [", ".join([str(f) for f in self.transform])]
colors = ["Red", "Green", "Blue"]
for i, b in enumerate(self._bands):
vrt_band = VRTRasterBandType(dataType=self.dtype,
band=i + 1,
NoDataValue=[str(self.nodata)])
source = ComplexSourceType(
NODATA=str(self.nodata),
SourceFilename=[
SourceFilenameType(
relativeToVRT=0,
valueOf_=self.filepath)],
SourceBand=[b])
if len(self._bands) == 3:
vrt_band.ColorInterp = [colors[i]]
# Scale floats to 0-255 and set the band type to Byte
# Note: this ensures mapnik will use the nodata value
# for the alpha channel.
if self.dtype == "Float32":
source.ScaleRatio = int(255)
vrt_band.dataType = 'Byte'
vrt_band.ComplexSource.append(source)
vrt.VRTRasterBand.append(vrt_band)
with open(self._vrt_path, 'w') as fh:
vrt.export(fh, 0)
return self._vrt_path
@property
def vrt_path(self):
if self._static_vrt is not None:
return self._static_vrt
return self._vrt_path
@property
def filepath(self):
return self._filepath
@filepath.setter
def filepath(self, val):
if val != self._filepath:
self._layer_srs = None
self._filepath = val
@property
def layer_srs(self):
if self._layer_srs is None:
try:
raster = gdal.Open(self.filepath)
srs = osr.SpatialReference()
srs.ImportFromWkt(raster.GetProjectionRef())
self._layer_srs = srs.ExportToProj4()
except RuntimeError:
self._layer_srs = \
"""+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs """
return self._layer_srs
def style_map(self, Map):
style = mapnik.Style()
rule = mapnik.Rule()
sym = mapnik.RasterSymbolizer()
sym.opacity = self.opacity
colorizer = mapnik.RasterColorizer(
mapnik.COLORIZER_DISCRETE,
mapnik.Color('white')
)
# colorizer.epsilon = 0.001
if self.colormap:
for stop in self.colormap:
colorizer.add_stop(stop['quantity'], mapnik.Color(
stop['color'].encode('ascii')))
sym.colorizer = colorizer
rule.symbols.append(sym)
style.rules.append(rule)
Map.append_style('Raster Style', style)
lyr = mapnik.Layer('GDAL Layer from TIFF', self.layer_srs)
lyr.datasource = mapnik.Gdal(base=os.path.dirname(self.vrt_path),
file=os.path.basename(self.vrt_path),
band=self.mapnik_band)
lyr.styles.append('Raster Style')
Map.layers.append(lyr)
return Map
def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom):
'''
'''
# NB: To be thread-safe Map object cannot be stored in the class state.
# see: https://groups.google.com/forum/#!topic/mapnik/USDlVfSk328
Map = mapnik.Map(width, height, srs)
Map.zoom_to_box(Box2d(xmin, ymin, xmax, ymax))
Map = self.style_map(Map)
img = mapnik.Image(width, height)
# Don't even call render with scale factor if it's not
# defined. Plays safe with older versions.
if self.scale_factor is None:
mapnik.render(Map, img)
else:
mapnik.render(Map, img, self.scale_factor)
def gamma_correct(im):
"""Fast gamma correction with PIL's image.point() method."""
if self.gamma != 1.0:
table = [pow(x / 255., 1.0 / self.gamma) * 255
for x in range(256)]
# Expand table to number of bands
table = table * len(im.mode)
return im.point(table)
else:
return im
# b = BytesIO(img.tostring())
img = Image.frombytes('RGBA', (width, height), img.tostring())
img = gamma_correct(img)
return img
| apache-2.0 |
sridevikoushik31/openstack | nova/api/openstack/compute/contrib/security_group_default_rules.py | 4 | 8190 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Metacloud Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
import webob
from webob import exc
from nova.api.openstack.compute.contrib import security_groups as sg
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.network.security_group import openstack_driver
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute',
'security_group_default_rules')
sg_nsmap = {None: wsgi.XMLNS_V11}
def make_default_rule(elem):
elem.set('id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
class SecurityGroupDefaultRulesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_default_rules')
elem = xmlutil.SubTemplateElement(root, 'security_group_default_rule',
selector='security_group_default_rules')
make_default_rule(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupDefaultRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_default_rule',
selector='security_group_default_rule')
make_default_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupDefaultRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = minidom.parseString(string)
security_group_rule = self._extract_security_group_default_rule(dom)
return {'body': {'security_group_default_rule': security_group_rule}}
def _extract_security_group_default_rule(self, node):
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_default_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
class SecurityGroupDefaultRulesController(sg.SecurityGroupControllerBase):
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
@wsgi.serializers(xml=SecurityGroupDefaultRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupDefaultRulesXMLDeserializer)
def create(self, req, body):
context = self._authorize_context(req)
authorize(context)
sg_rule = self._from_body(body, 'security_group_default_rule')
try:
values = self._rule_args_to_dict(to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if values is None:
msg = _('Not enough parameters to build a valid rule.')
raise exc.HTTPBadRequest(explanation=msg)
if self.security_group_api.default_rule_exists(context, values):
msg = _('This default rule already exists.')
raise exc.HTTPBadRequest(explanation=msg)
security_group_rule = self.security_group_api.add_default_rules(
context, [values])[0]
fmt_rule = self._format_security_group_default_rule(
security_group_rule)
return {'security_group_default_rule': fmt_rule}
def _rule_args_to_dict(self, to_port=None, from_port=None,
ip_protocol=None, cidr=None):
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
@wsgi.serializers(xml=SecurityGroupDefaultRuleTemplate)
def show(self, req, id):
context = self._authorize_context(req)
authorize(context)
id = self.security_group_api.validate_id(id)
LOG.debug(_("Showing security_group_default_rule with id %s") % id)
try:
rule = self.security_group_api.get_default_rule(context, id)
except exception.SecurityGroupDefaultRuleNotFound:
raise exc.HTTPNotFound(_("security group default rule not found"))
fmt_rule = self._format_security_group_default_rule(rule)
return {"security_group_default_rule": fmt_rule}
def delete(self, req, id):
context = self._authorize_context(req)
authorize(context)
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_default_rule(context, id)
self.security_group_api.remove_default_rules(context, [rule['id']])
return webob.Response(status_int=204)
@wsgi.serializers(xml=SecurityGroupDefaultRulesTemplate)
def index(self, req):
context = self._authorize_context(req)
authorize(context)
ret = {'security_group_default_rules': []}
for rule in self.security_group_api.get_all_default_rules(context):
rule_fmt = self._format_security_group_default_rule(rule)
ret['security_group_default_rules'].append(rule_fmt)
return ret
def _format_security_group_default_rule(self, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['ip_range'] = {}
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
class Security_group_default_rules(extensions.ExtensionDescriptor):
"""Default rules for security group support."""
name = "SecurityGroupDefaultRules"
alias = "os-security-group-default-rules"
namespace = ("http://docs.openstack.org/compute/ext/"
"securitygroupdefaultrules/api/v1.1")
updated = "2013-02-05T00:00:00+00:00"
def get_resources(self):
resources = [
extensions.ResourceExtension('os-security-group-default-rules',
SecurityGroupDefaultRulesController(),
collection_actions={'create': 'POST',
'delete': 'DELETE',
'index': 'GET'},
member_actions={'show': 'GET'})]
return resources
| apache-2.0 |
micadeyeye/Blongo | django/contrib/messages/tests/session.py | 413 | 1230 | from django.contrib.messages.tests.base import BaseTest
from django.contrib.messages.storage.session import SessionStorage
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = messages
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.request.session.get(storage.session_key, [])
return len(data)
class SessionTest(BaseTest):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
| bsd-3-clause |
dpyro/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_runner.py | 167 | 20633 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import _pytest._code
import os
import py
import pytest
import sys
from _pytest import runner, main
class TestSetupState:
def test_setup(self, testdir):
ss = runner.SetupState()
item = testdir.getitem("def test_func(): pass")
l = [1]
ss.prepare(item)
ss.addfinalizer(l.pop, colitem=item)
assert l
ss._pop_and_teardown()
assert not l
def test_teardown_exact_stack_empty(self, testdir):
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
ss.teardown_exact(item, None)
def test_setup_fails_and_failure_is_cached(self, testdir):
item = testdir.getitem("""
def setup_module(mod):
raise ValueError(42)
def test_func(): pass
""") # noqa
ss = runner.SetupState()
pytest.raises(ValueError, lambda: ss.prepare(item))
pytest.raises(ValueError, lambda: ss.prepare(item))
def test_teardown_multiple_one_fails(self, testdir):
r = []
def fin1(): r.append('fin1')
def fin2(): raise Exception('oops')
def fin3(): r.append('fin3')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
ss.addfinalizer(fin3, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops',)
assert r == ['fin3', 'fin1']
def test_teardown_multiple_fail(self, testdir):
# Ensure the first exception is the one which is re-raised.
# Ideally both would be reported however.
def fin1(): raise Exception('oops1')
def fin2(): raise Exception('oops2')
item = testdir.getitem("def test_func(): pass")
ss = runner.SetupState()
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
with pytest.raises(Exception) as err:
ss._callfinalizers(item)
assert err.value.args == ('oops2',)
class BaseFunctionalTests:
def test_passfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
pass
""")
rep = reports[1]
assert rep.passed
assert not rep.failed
assert rep.outcome == "passed"
assert not rep.longrepr
def test_failfunction(self, testdir):
reports = testdir.runitem("""
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.passed
assert not rep.skipped
assert rep.failed
assert rep.when == "call"
assert rep.outcome == "failed"
#assert isinstance(rep.longrepr, ReprExceptionInfo)
def test_skipfunction(self, testdir):
reports = testdir.runitem("""
import pytest
def test_func():
pytest.skip("hello")
""")
rep = reports[1]
assert not rep.failed
assert not rep.passed
assert rep.skipped
assert rep.outcome == "skipped"
#assert rep.skipped.when == "call"
#assert rep.skipped.when == "call"
#assert rep.skipped == "%sreason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.path
#assert not rep.skipped.failurerepr
def test_skip_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
pytest.skip("hello")
def test_func():
pass
""")
print(reports)
rep = reports[0]
assert not rep.failed
assert not rep.passed
assert rep.skipped
#assert rep.skipped.reason == "hello"
#assert rep.skipped.location.lineno == 3
#assert rep.skipped.location.lineno == 3
assert len(reports) == 2
assert reports[1].passed # teardown
def test_failure_in_setup_function(self, testdir):
reports = testdir.runitem("""
import pytest
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
rep = reports[0]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "setup"
assert len(reports) == 2
def test_failure_in_teardown_function(self, testdir):
reports = testdir.runitem("""
import pytest
def teardown_function(func):
raise ValueError(42)
def test_func():
pass
""")
print(reports)
assert len(reports) == 3
rep = reports[2]
assert not rep.skipped
assert not rep.passed
assert rep.failed
assert rep.when == "teardown"
#assert rep.longrepr.reprcrash.lineno == 3
#assert rep.longrepr.reprtraceback.reprentries
def test_custom_failure_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
return "hello"
""")
reports = testdir.runitem("""
import pytest
def test_func():
assert 0
""")
rep = reports[1]
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "call"
#assert rep.failed.where.lineno == 3
#assert rep.failed.where.path.basename == "test_func.py"
#assert rep.failed.failurerepr == "hello"
def test_teardown_final_returncode(self, testdir):
rec = testdir.inline_runsource("""
def test_func():
pass
def teardown_function(func):
raise ValueError(42)
""")
assert rec.ret == 1
def test_exact_teardown_issue90(self, testdir):
rec = testdir.inline_runsource("""
import pytest
class TestClass:
def test_method(self):
pass
def teardown_class(cls):
raise Exception()
def test_func():
import sys
# on python2 exc_info is keept till a function exits
# so we would end up calling test functions while
# sys.exc_info would return the indexerror
# from guessing the lastitem
excinfo = sys.exc_info()
import traceback
assert excinfo[0] is None, \
traceback.format_exception(*excinfo)
def teardown_function(func):
raise ValueError(42)
""")
reps = rec.getreports("pytest_runtest_logreport")
print (reps)
for i in range(2):
assert reps[i].nodeid.endswith("test_method")
assert reps[i].passed
assert reps[2].when == "teardown"
assert reps[2].failed
assert len(reps) == 6
for i in range(3,5):
assert reps[i].nodeid.endswith("test_func")
assert reps[i].passed
assert reps[5].when == "teardown"
assert reps[5].nodeid.endswith("test_func")
assert reps[5].failed
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
testdir.makepyfile(conftest="""
import pytest
class Function(pytest.Function):
def repr_failure(self, excinfo):
assert 0
""")
reports = testdir.runitem("""
def setup_function(func):
raise ValueError(42)
def test_func():
pass
""")
assert len(reports) == 2
rep = reports[0]
print(rep)
assert not rep.skipped
assert not rep.passed
assert rep.failed
#assert rep.outcome.when == "setup"
#assert rep.outcome.where.lineno == 3
#assert rep.outcome.where.path.basename == "test_func.py"
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
def test_systemexit_does_not_bail_out(self, testdir):
try:
reports = testdir.runitem("""
def test_func():
raise SystemExit(42)
""")
except SystemExit:
pytest.fail("runner did not catch SystemExit")
rep = reports[1]
assert rep.failed
assert rep.when == "call"
def test_exit_propagates(self, testdir):
try:
testdir.runitem("""
import pytest
def test_func():
raise pytest.exit.Exception()
""")
except pytest.exit.Exception:
pass
else:
pytest.fail("did not raise")
class TestExecutionNonForked(BaseFunctionalTests):
def getrunner(self):
def f(item):
return runner.runtestprotocol(item, log=False)
return f
def test_keyboardinterrupt_propagates(self, testdir):
try:
testdir.runitem("""
def test_func():
raise KeyboardInterrupt("fake")
""")
except KeyboardInterrupt:
pass
else:
pytest.fail("did not raise")
class TestExecutionForked(BaseFunctionalTests):
pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
def getrunner(self):
# XXX re-arrange this test to live in pytest-xdist
boxed = pytest.importorskip("xdist.boxed")
return boxed.forked_run_report
def test_suicide(self, testdir):
reports = testdir.runitem("""
def test_func():
import os
os.kill(os.getpid(), 15)
""")
rep = reports[0]
assert rep.failed
assert rep.when == "???"
class TestSessionReports:
def test_collect_result(self, testdir):
col = testdir.getmodulecol("""
def test_func1():
pass
class TestClass:
pass
""")
rep = runner.collect_one_node(col)
assert not rep.failed
assert not rep.skipped
assert rep.passed
locinfo = rep.location
assert locinfo[0] == col.fspath.basename
assert not locinfo[1]
assert locinfo[2] == col.fspath.basename
res = rep.result
assert len(res) == 2
assert res[0].name == "test_func1"
assert res[1].name == "TestClass"
def test_skip_at_module_scope(self, testdir):
col = testdir.getmodulecol("""
import pytest
pytest.skip("hello")
def test_func():
pass
""")
rep = main.collect_one_node(col)
assert not rep.failed
assert not rep.passed
assert rep.skipped
reporttypes = [
runner.BaseReport,
runner.TestReport,
runner.TeardownErrorReport,
runner.CollectReport,
]
@pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes])
def test_report_extra_parameters(reporttype):
if hasattr(py.std.inspect, 'signature'):
args = list(py.std.inspect.signature(reporttype.__init__).parameters.keys())[1:]
else:
args = py.std.inspect.getargspec(reporttype.__init__)[0][1:]
basekw = dict.fromkeys(args, [])
report = reporttype(newthing=1, **basekw)
assert report.newthing == 1
def test_callinfo():
ci = runner.CallInfo(lambda: 0, '123')
assert ci.when == "123"
assert ci.result == 0
assert "result" in repr(ci)
ci = runner.CallInfo(lambda: 0/0, '123')
assert ci.when == "123"
assert not hasattr(ci, 'result')
assert ci.excinfo
assert "exc" in repr(ci)
# design question: do we want general hooks in python files?
# then something like the following functional tests makes sense
@pytest.mark.xfail
def test_runtest_in_module_ordering(testdir):
p1 = testdir.makepyfile("""
def pytest_runtest_setup(item): # runs after class-level!
item.function.mylist.append("module")
class TestClass:
def pytest_runtest_setup(self, item):
assert not hasattr(item.function, 'mylist')
item.function.mylist = ['class']
def pytest_funcarg__mylist(self, request):
return request.function.mylist
def pytest_runtest_call(self, item, __multicall__):
try:
__multicall__.execute()
except ValueError:
pass
def test_hello1(self, mylist):
assert mylist == ['class', 'module'], mylist
raise ValueError()
def test_hello2(self, mylist):
assert mylist == ['class', 'module'], mylist
def pytest_runtest_teardown(item):
del item.function.mylist
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_outcomeexception_exceptionattributes():
outcome = runner.OutcomeException('test')
assert outcome.args[0] == outcome.msg
def test_pytest_exit():
try:
pytest.exit("hello")
except pytest.exit.Exception:
excinfo = _pytest._code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
def test_pytest_fail():
try:
pytest.fail("hello")
except pytest.fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Failed")
def test_pytest_fail_notrace(testdir):
testdir.makepyfile("""
import pytest
def test_hello():
pytest.fail("hello", pytrace=False)
def teardown_function(function):
pytest.fail("world", pytrace=False)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"world",
"hello",
])
assert 'def teardown_function' not in result.stdout.str()
@pytest.mark.parametrize('str_prefix', ['u', ''])
def test_pytest_fail_notrace_non_ascii(testdir, str_prefix):
"""Fix pytest.fail with pytrace=False with non-ascii characters (#1178).
This tests with native and unicode strings containing non-ascii chars.
"""
testdir.makepyfile(u"""
# coding: utf-8
import pytest
def test_hello():
pytest.fail(%s'oh oh: ☺', pytrace=False)
""" % str_prefix)
result = testdir.runpytest()
if sys.version_info[0] >= 3:
result.stdout.fnmatch_lines(['*test_hello*', "oh oh: ☺"])
else:
result.stdout.fnmatch_lines(['*test_hello*', "oh oh: *"])
assert 'def test_hello' not in result.stdout.str()
def test_pytest_no_tests_collected_exit_status(testdir):
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 0 items*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
testdir.makepyfile(test_foo="""
def test_foo():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == main.EXIT_OK
result = testdir.runpytest('-k nonmatch')
result.stdout.fnmatch_lines('*collected 1 items*')
result.stdout.fnmatch_lines('*1 deselected*')
assert result.ret == main.EXIT_NOTESTSCOLLECTED
def test_exception_printing_skip():
try:
pytest.skip("hello")
except pytest.skip.Exception:
excinfo = _pytest._code.ExceptionInfo()
s = excinfo.exconly(tryshort=True)
assert s.startswith("Skipped")
def test_importorskip(monkeypatch):
importorskip = pytest.importorskip
def f():
importorskip("asdlkj")
try:
sys = importorskip("sys") # noqa
assert sys == py.std.sys
#path = pytest.importorskip("os.path")
#assert path == py.std.os.path
excinfo = pytest.raises(pytest.skip.Exception, f)
path = py.path.local(excinfo.getrepr().reprcrash.path)
# check that importorskip reports the actual call
# in this test the test_runner.py file
assert path.purebasename == "test_runner"
pytest.raises(SyntaxError, "pytest.importorskip('x y z')")
pytest.raises(SyntaxError, "pytest.importorskip('x=y')")
mod = py.std.types.ModuleType("hello123")
mod.__version__ = "1.3"
monkeypatch.setitem(sys.modules, "hello123", mod)
pytest.raises(pytest.skip.Exception, """
pytest.importorskip("hello123", minversion="1.3.1")
""")
mod2 = pytest.importorskip("hello123", minversion="1.3")
assert mod2 == mod
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_importorskip_imports_last_module_part():
ospath = pytest.importorskip("os.path")
assert os.path == ospath
def test_importorskip_dev_module(monkeypatch):
try:
mod = py.std.types.ModuleType("mockmodule")
mod.__version__ = '0.13.0.dev-43290'
monkeypatch.setitem(sys.modules, 'mockmodule', mod)
mod2 = pytest.importorskip('mockmodule', minversion='0.12.0')
assert mod2 == mod
pytest.raises(pytest.skip.Exception, """
pytest.importorskip('mockmodule1', minversion='0.14.0')""")
except pytest.skip.Exception:
print(_pytest._code.ExceptionInfo())
pytest.fail("spurious skip")
def test_pytest_cmdline_main(testdir):
p = testdir.makepyfile("""
import pytest
def test_hello():
assert 1
if __name__ == '__main__':
pytest.cmdline.main([__file__])
""")
import subprocess
popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)
popen.communicate()
ret = popen.wait()
assert ret == 0
def test_unicode_in_longrepr(testdir):
testdir.makeconftest("""
import py
def pytest_runtest_makereport(__multicall__):
rep = __multicall__.execute()
if rep.when == "call":
rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8")
return rep
""")
testdir.makepyfile("""
def test_out():
assert 0
""")
result = testdir.runpytest()
assert result.ret == 1
assert "UnicodeEncodeError" not in result.stderr.str()
def test_failure_in_setup(testdir):
testdir.makepyfile("""
def setup_module():
0/0
def test_func():
pass
""")
result = testdir.runpytest("--tb=line")
assert "def setup_module" not in result.stdout.str()
def test_makereport_getsource(testdir):
testdir.makepyfile("""
def test_foo():
if False: pass
else: assert False
""")
result = testdir.runpytest()
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(['*else: assert False*'])
def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
"""Test that exception in dynamically generated code doesn't break getting the source line."""
import inspect
original_findsource = inspect.findsource
def findsource(obj, *args, **kwargs):
# Can be triggered by dynamically created functions
if obj.__name__ == 'foo':
raise IndexError()
return original_findsource(obj, *args, **kwargs)
monkeypatch.setattr(inspect, 'findsource', findsource)
testdir.makepyfile("""
import pytest
@pytest.fixture
def foo(missing):
pass
def test_fix(foo):
assert False
""")
result = testdir.runpytest('-vv')
assert 'INTERNALERROR' not in result.stdout.str()
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
def test_store_except_info_on_eror():
""" Test that upon test failure, the exception info is stored on
sys.last_traceback and friends.
"""
# Simulate item that raises a specific exception
class ItemThatRaises:
def runtest(self):
raise IndexError('TEST')
try:
runner.pytest_runtest_call(ItemThatRaises())
except IndexError:
pass
# Check that exception info is stored on sys
assert sys.last_type is IndexError
assert sys.last_value.args[0] == 'TEST'
assert sys.last_traceback
| mpl-2.0 |
acsone/website | website_form_recaptcha/controllers/main.py | 5 | 1272 | # -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import http
from openerp.http import request
from openerp.exceptions import ValidationError
import json
class WebsiteForm(http.Controller):
@http.route(
'/website/recaptcha/',
type='http',
auth='public',
methods=['GET'],
website=True,
multilang=False,
)
def recaptcha_public(self):
return json.dumps({
'site_key': request.env['ir.config_parameter'].get_param(
'recaptcha.key.site'
),
})
def extract_data(self, **kwargs):
""" Inject ReCaptcha validation into pre-existing data extraction """
captcha_obj = request.env['website.form.recaptcha']
ip_addr = request.httprequest.environ.get('HTTP_X_FORWARDED_FOR')
if ip_addr:
ip_addr = ip_addr.split(',')[0]
else:
ip_addr = request.httprequest.remote_addr
try:
captcha_obj.action_validate(
kwargs.get(captcha_obj.RESPONSE_ATTR), ip_addr
)
except ValidationError:
raise ValidationError([captcha_obj.RESPONSE_ATTR])
return True
| agpl-3.0 |
attakei/readthedocs-oauth | readthedocs/projects/symlinks.py | 15 | 6057 | """Project symlink creation"""
import os
import logging
from django.conf import settings
import redis
from readthedocs.core.utils import run_on_app_servers
from readthedocs.projects.constants import LOG_TEMPLATE
from readthedocs.restapi.client import api
log = logging.getLogger(__name__)
def symlink_cnames(version):
"""Symlink project CNAME domains
OLD
Link from HOME/user_builds/cnames/<cname> ->
HOME/user_builds/<project>/rtd-builds/
NEW
Link from HOME/user_builds/cnametoproject/<cname> ->
HOME/user_builds/<project>/
"""
try:
redis_conn = redis.Redis(**settings.REDIS)
cnames = redis_conn.smembers('rtd_slug:v1:%s' % version.project.slug)
except redis.ConnectionError:
log.error(LOG_TEMPLATE
.format(project=version.project.slug, version=version.slug,
msg='Failed to symlink cnames, Redis error.'),
exc_info=True)
return
for cname in cnames:
log.debug(LOG_TEMPLATE
.format(project=version.project.slug, version=version.slug,
msg="Symlinking CNAME: %s" % cname))
docs_dir = version.project.rtd_build_path(version.slug)
# Chop off the version from the end.
docs_dir = '/'.join(docs_dir.split('/')[:-1])
# Old symlink location -- Keep this here til we change nginx over
symlink = version.project.cnames_symlink_path(cname)
run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1]))
run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink))
# New symlink location
new_docs_dir = version.project.doc_path
new_cname_symlink = os.path.join(getattr(settings, 'SITE_ROOT'), 'cnametoproject', cname)
run_on_app_servers('mkdir -p %s' % '/'.join(new_cname_symlink.split('/')[:-1]))
run_on_app_servers('ln -nsf %s %s' % (new_docs_dir, new_cname_symlink))
def symlink_subprojects(version):
"""Symlink project subprojects
Link from HOME/user_builds/project/subprojects/<project> ->
HOME/user_builds/<project>/rtd-builds/
"""
# Subprojects
if getattr(settings, 'DONT_HIT_DB', True):
subproject_slugs = [data['slug']
for data in (api.project(version.project.pk)
.subprojects
.get()['subprojects'])]
else:
rels = version.project.subprojects.all()
subproject_slugs = [rel.child.slug for rel in rels]
for slug in subproject_slugs:
slugs = [slug]
if '_' in slugs[0]:
slugs.append(slugs[0].replace('_', '-'))
for subproject_slug in slugs:
log.debug(LOG_TEMPLATE
.format(project=version.project.slug,
version=version.slug,
msg="Symlinking subproject: %s" % subproject_slug))
# The directory for this specific subproject
symlink = version.project.subprojects_symlink_path(subproject_slug)
run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1]))
# Where the actual docs live
docs_dir = os.path.join(settings.DOCROOT, subproject_slug, 'rtd-builds')
run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink))
def symlink_translations(version):
"""Symlink project translations
Link from HOME/user_builds/project/translations/<lang> ->
HOME/user_builds/<project>/rtd-builds/
"""
translations = {}
if getattr(settings, 'DONT_HIT_DB', True):
for trans in (api
.project(version.project.pk)
.translations.get()['translations']):
translations[trans['language']] = trans['slug']
else:
for trans in version.project.translations.all():
translations[trans.language] = trans.slug
# Default language, and pointer for 'en'
version_slug = version.project.slug.replace('_', '-')
translations[version.project.language] = version_slug
if 'en' not in translations:
translations['en'] = version_slug
run_on_app_servers(
'mkdir -p {0}'
.format(os.path.join(version.project.doc_path, 'translations')))
for (language, slug) in translations.items():
log.debug(LOG_TEMPLATE.format(
project=version.project.slug,
version=version.slug,
msg="Symlinking translation: %s->%s" % (language, slug)
))
# The directory for this specific translation
symlink = version.project.translations_symlink_path(language)
translation_path = os.path.join(settings.DOCROOT, slug, 'rtd-builds')
run_on_app_servers('ln -nsf {0} {1}'.format(translation_path, symlink))
def symlink_single_version(version):
"""Symlink project single version
Link from HOME/user_builds/<project>/single_version ->
HOME/user_builds/<project>/rtd-builds/<default_version>/
"""
default_version = version.project.get_default_version()
log.debug(LOG_TEMPLATE
.format(project=version.project.slug, version=default_version,
msg="Symlinking single_version"))
# The single_version directory
symlink = version.project.single_version_symlink_path()
run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1]))
# Where the actual docs live
docs_dir = os.path.join(settings.DOCROOT, version.project.slug, 'rtd-builds', default_version)
run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink))
def remove_symlink_single_version(version):
"""Remove single_version symlink"""
log.debug(LOG_TEMPLATE.format(
project=version.project.slug,
version=version.project.get_default_version(),
msg="Removing symlink for single_version")
)
symlink = version.project.single_version_symlink_path()
run_on_app_servers('rm -f %s' % symlink)
| mit |
shaufi10/odoo | addons/crm/crm.py | 267 | 7967 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.http import request
AVAILABLE_PRIORITIES = [
('0', 'Very Low'),
('1', 'Low'),
('2', 'Normal'),
('3', 'High'),
('4', 'Very High'),
]
class crm_tracking_medium(osv.Model):
# OLD crm.case.channel
_name = "crm.tracking.medium"
_description = "Channels"
_order = 'name'
_columns = {
'name': fields.char('Channel Name', required=True),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *a: 1,
}
class crm_tracking_campaign(osv.Model):
# OLD crm.case.resource.type
_name = "crm.tracking.campaign"
_description = "Campaign"
_rec_name = "name"
_columns = {
'name': fields.char('Campaign Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class crm_tracking_source(osv.Model):
_name = "crm.tracking.source"
_description = "Source"
_rec_name = "name"
_columns = {
'name': fields.char('Source Name', required=True, translate=True),
}
class crm_tracking_mixin(osv.AbstractModel):
"""Mixin class for objects which can be tracked by marketing. """
_name = 'crm.tracking.mixin'
_columns = {
'campaign_id': fields.many2one('crm.tracking.campaign', 'Campaign', # old domain ="['|',('section_id','=',section_id),('section_id','=',False)]"
help="This is a name that helps you keep track of your different campaign efforts Ex: Fall_Drive, Christmas_Special"),
'source_id': fields.many2one('crm.tracking.source', 'Source', help="This is the source of the link Ex: Search Engine, another domain, or name of email list"),
'medium_id': fields.many2one('crm.tracking.medium', 'Channel', help="This is the method of delivery. Ex: Postcard, Email, or Banner Ad", oldname='channel_id'),
}
def tracking_fields(self):
return [('utm_campaign', 'campaign_id'), ('utm_source', 'source_id'), ('utm_medium', 'medium_id')]
def tracking_get_values(self, cr, uid, vals, context=None):
for key, fname in self.tracking_fields():
field = self._fields[fname]
value = vals.get(fname) or (request and request.httprequest.cookies.get(key)) # params.get should be always in session by the dispatch from ir_http
if field.type == 'many2one' and isinstance(value, basestring):
# if we receive a string for a many2one, we search/create the id
if value:
Model = self.pool[field.comodel_name]
rel_id = Model.name_search(cr, uid, value, context=context)
if rel_id:
rel_id = rel_id[0][0]
else:
rel_id = Model.create(cr, uid, {'name': value}, context=context)
vals[fname] = rel_id
else:
# Here the code for others cases that many2one
vals[fname] = value
return vals
def _get_default_track(self, cr, uid, field, context=None):
return self.tracking_get_values(cr, uid, {}, context=context).get(field)
_defaults = {
'source_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'source_id', ctx),
'campaign_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'campaign_id', ctx),
'medium_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'medium_id', ctx),
}
class crm_case_stage(osv.osv):
""" Model for case stages. This models the main stages of a document
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.case.stage"
_description = "Stage of case"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'probability': fields.float('Probability (%)', required=True, help="This percentage depicts the default/average probability of the Case for this stage to be a success"),
'on_change': fields.boolean('Change Probability Automatically', help="Setting this stage will change the probability automatically on the opportunity."),
'requirements': fields.text('Requirements'),
'section_ids': fields.many2many('crm.case.section', 'section_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Default to New Sales Team',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
'type': fields.selection([('lead', 'Lead'), ('opportunity', 'Opportunity'), ('both', 'Both')],
string='Type', required=True,
help="This field is used to distinguish stages related to Leads from stages related to Opportunities, or to specify stages available for both types."),
}
_defaults = {
'sequence': 1,
'probability': 0.0,
'on_change': True,
'fold': False,
'type': 'both',
'case_default': True,
}
class crm_case_categ(osv.osv):
""" Category of Case """
_name = "crm.case.categ"
_description = "Category of Case"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'object_id': fields.many2one('ir.model', 'Object Name'),
}
def _find_object_id(self, cr, uid, context=None):
"""Finds id for case object"""
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}
class crm_payment_mode(osv.osv):
""" Payment Mode for Fund """
_name = "crm.payment.mode"
_description = "CRM Payment Mode"
_columns = {
'name': fields.char('Name', required=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MahjongRepository/tenhou-python-bot | project/utils/logger.py | 1 | 2471 | import datetime
import hashlib
import logging
import os
from logging.handlers import SysLogHandler
from utils.settings_handler import settings
LOG_FORMAT = "%(asctime)s %(levelname)s: %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class ColoredFormatter(logging.Formatter):
"""
Apply only to the console handler.
"""
green = "\u001b[32m"
cyan = "\u001b[36m"
reset = "\u001b[0m"
def format(self, record):
format_style = self._fmt
if record.getMessage().startswith("id="):
format_style = f"{ColoredFormatter.green}{format_style}{ColoredFormatter.reset}"
if record.getMessage().startswith("msg="):
format_style = f"{ColoredFormatter.cyan}{format_style}{ColoredFormatter.reset}"
formatter = logging.Formatter(format_style)
return formatter.format(record)
def set_up_logging(save_to_file=True, print_to_console=True, logger_name="bot"):
"""
Logger for tenhou communication and AI output
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if print_to_console:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = ColoredFormatter(LOG_FORMAT, datefmt=DATE_FORMAT)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_prefix = settings.LOG_PREFIX
if not log_prefix:
log_prefix = hashlib.sha1(settings.USER_ID.encode("utf-8")).hexdigest()[:5]
if save_to_file:
logs_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "logs")
if not os.path.exists(logs_directory):
os.mkdir(logs_directory)
formatter = logging.Formatter(LOG_FORMAT, datefmt=DATE_FORMAT)
# we need it to distinguish different bots logs (if they were run in the same time)
file_name = "{}_{}.log".format(log_prefix, datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S"))
fh = logging.FileHandler(os.path.join(logs_directory, file_name), encoding="utf-8")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if settings.PAPERTRAIL_HOST_AND_PORT:
syslog = SysLogHandler(address=settings.PAPERTRAIL_HOST_AND_PORT)
game_id = f"BOT_{log_prefix}"
formatter = ColoredFormatter(f"%(asctime)s {game_id}: %(message)s", datefmt=DATE_FORMAT)
syslog.setFormatter(formatter)
logger.addHandler(syslog)
return logger
| mit |
nirmeshk/oh-mainline | vendor/packages/html5lib/html5lib/filters/lint.py | 132 | 4033 | from gettext import gettext
_ = gettext
import _base
from html5lib.constants import cdataElements, rcdataElements, voidElements
from html5lib.constants import spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class LintError(Exception): pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
if not isinstance(name, unicode):
raise LintError(_(u"Tag name is not a string: %r") % name)
if not name:
raise LintError(_(u"Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_(u"Void element reported as StartTag token: %s") % name)
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_(u"Non-void element reported as EmptyTag token: %s") % token["name"])
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, unicode):
raise LintError(_("Attribute name is not a string: %r") % name)
if not name:
raise LintError(_(u"Empty attribute name"))
if not isinstance(value, unicode):
raise LintError(_("Attribute value is not a string: %r") % value)
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, unicode):
raise LintError(_(u"Tag name is not a string: %r") % name)
if not name:
raise LintError(_(u"Empty tag name"))
if name in voidElements:
raise LintError(_(u"Void element reported as EndTag token: %s") % name)
start_name = open_elements.pop()
if start_name != name:
raise LintError(_(u"EndTag (%s) does not match StartTag (%s)") % (name, start_name))
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, unicode):
raise LintError(_("Attribute name is not a string: %r") % data)
if not data:
raise LintError(_(u"%s token with empty data") % type)
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_(u"Non-space character(s) found in SpaceCharacters token: ") % data)
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
if not isinstance(name, unicode):
raise LintError(_(u"Tag name is not a string: %r") % name)
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_(u"Unknown token type: %s") % type)
yield token
| agpl-3.0 |
petewarden/tensorflow_makefile | tensorflow/contrib/metrics/python/ops/histogram_ops.py | 6 | 10226 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Metrics that use histograms.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
def auc_using_histogram(boolean_labels,
scores,
score_range,
nbins=100,
collections=None,
check_shape=True,
name=None):
"""AUC computed by maintaining histograms.
Rather than computing AUC directly, this Op maintains Variables containing
histograms of the scores associated with `True` and `False` labels. By
comparing these the AUC is generated, with some discretization error.
See: "Efficient AUC Learning Curve Calculation" by Bouckaert.
This AUC Op updates in `O(batch_size + nbins)` time and works well even with
large class imbalance. The accuracy is limited by discretization error due
to finite number of bins. If scores are concentrated in a fewer bins,
accuracy is lower. If this is a concern, we recommend trying different
numbers of bins and comparing results.
Args:
boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
scores: 1-D numeric `Tensor`, same shape as boolean_labels.
score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
collections: List of graph collections keys. Internal histogram Variables
are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
check_shape: Boolean. If `True`, do a runtime shape check on the scores
and labels.
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
if collections is None:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
with variable_scope.variable_op_scope(
[boolean_labels, scores, score_range], name, 'auc_using_histogram'):
score_range = ops.convert_to_tensor(score_range, name='score_range')
boolean_labels, scores = _check_labels_and_scores(
boolean_labels, scores, check_shape)
hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
score_range, nbins)
hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(hist_true,
hist_false,
nbins,
collections)
auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
return auc, update_op
def _check_labels_and_scores(boolean_labels, scores, check_shape):
"""Check the rank of labels/scores, return tensor versions."""
with ops.op_scope([boolean_labels, scores], '_check_labels_and_scores'):
boolean_labels = ops.convert_to_tensor(boolean_labels,
name='boolean_labels')
scores = ops.convert_to_tensor(scores, name='scores')
if boolean_labels.dtype != dtypes.bool:
raise ValueError(
'Argument boolean_labels should have dtype bool. Found: %s',
boolean_labels.dtype)
if check_shape:
labels_rank_1 = logging_ops.Assert(
math_ops.equal(1, array_ops.rank(boolean_labels)),
['Argument boolean_labels should have rank 1. Found: ',
boolean_labels.name, array_ops.shape(boolean_labels)])
scores_rank_1 = logging_ops.Assert(
math_ops.equal(1, array_ops.rank(scores)),
['Argument scores should have rank 1. Found: ', scores.name,
array_ops.shape(scores)])
with ops.control_dependencies([labels_rank_1, scores_rank_1]):
return boolean_labels, scores
else:
return boolean_labels, scores
def _make_auc_histograms(boolean_labels, scores, score_range, nbins):
"""Create histogram tensors from one batch of labels/scores."""
with variable_scope.variable_op_scope(
[boolean_labels, scores, nbins], None, 'make_auc_histograms'):
# Histogram of scores for records in this batch with True label.
hist_true = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, boolean_labels),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_true')
# Histogram of scores for records in this batch with False label.
hist_false = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_false')
return hist_true, hist_false
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_op_scope(
[hist_true, hist_false], None, 'hist_accumulate'):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
def _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins):
"""Convert histograms to auc.
Args:
hist_true_acc: `Tensor` holding accumulated histogram of scores for records
that were `True`.
hist_false_acc: `Tensor` holding accumulated histogram of scores for
records that were `False`.
nbins: Integer number of bins in the histograms.
Returns:
Scalar `Tensor` estimating AUC.
"""
# Note that this follows the "Approximating AUC" section in:
# Efficient AUC learning curve calculation, R. R. Bouckaert,
# AI'06 Proceedings of the 19th Australian joint conference on Artificial
# Intelligence: advances in Artificial Intelligence
# Pages 181-191.
# Note that the above paper has an error, and we need to re-order our bins to
# go from high to low score.
# Normalize histogram so we get fraction in each bin.
normed_hist_true = math_ops.truediv(hist_true_acc,
math_ops.reduce_sum(hist_true_acc))
normed_hist_false = math_ops.truediv(hist_false_acc,
math_ops.reduce_sum(hist_false_acc))
# These become delta x, delta y from the paper.
delta_y_t = array_ops.reverse(normed_hist_true, [True], name='delta_y_t')
delta_x_t = array_ops.reverse(normed_hist_false, [True], name='delta_x_t')
# strict_1d_cumsum requires float32 args.
delta_y_t = math_ops.cast(delta_y_t, dtypes.float32)
delta_x_t = math_ops.cast(delta_x_t, dtypes.float32)
# Trapezoidal integration, \int_0^1 0.5 * (y_t + y_{t-1}) dx_t
y_t = _strict_1d_cumsum(delta_y_t, nbins)
first_trap = delta_x_t[0] * y_t[0] / 2.0
other_traps = delta_x_t[1:] * (y_t[1:] + y_t[:nbins - 1]) / 2.0
return math_ops.add(first_trap, math_ops.reduce_sum(other_traps), name='auc')
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# Also see if cast to float32 above can be removed with new cumsum.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_1d_cumsum(tensor, len_tensor):
"""Cumsum of a 1D tensor with defined shape by padding and convolving."""
# Assumes tensor shape is fully defined.
with ops.op_scope([tensor], 'strict_1d_cumsum'):
if len_tensor == 0:
return constant_op.constant([])
len_pad = len_tensor - 1
x = array_ops.pad(tensor, [[len_pad, 0]])
h = array_ops.ones_like(x)
return _strict_conv1d(x, h)[:len_tensor]
# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See: https://github.com/tensorflow/tensorflow/issues/813
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.op_scope([x, h], 'strict_conv1d'):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1])
| apache-2.0 |
lalitkumarj/NEXT-psych | next/database/database_backup.py | 1 | 2053 | #!/usr/bin/python
"""
Every 30 minutes backs up database to S3. To recover the database, (i.e. reverse the process)
simply download the file from S3, un-tar it, and use the command:
(./)mongorestore --host {hostname} --port {port} path/to/dump/mongodump
where {hostname} and {port} are as they are below
"""
import sys
sys.path.append("/next_backend")
import time
import traceback
import next.utils as utils
import subprocess
import next.constants as constants
import os
NEXT_BACKEND_GLOBAL_HOST = os.environ.get('NEXT_BACKEND_GLOBAL_HOST', 'localhost')
AWS_BUCKET_NAME = os.environ.get('AWS_BUCKET_NAME','next-database-backups')
timestamp = utils.datetimeNow()
print "[ %s ] starting backup of MongoDB to S3..." % str(timestamp)
print "[ %s ] constants.AWS_ACCESS_ID = %s" % (str(timestamp),constants.AWS_ACCESS_ID)
subprocess.call('/usr/bin/mongodump -vvvvv --host {hostname}:{port} --out /dump/mongo_dump'.format( hostname=constants.MONGODB_HOST, port=constants.MONGODB_PORT ),shell=True)
try:
tar_file = sys.argv[1]
except:
tar_file = 'mongo_dump_{hostname}_{timestamp}.tar.gz'.format( hostname=NEXT_BACKEND_GLOBAL_HOST, timestamp= timestamp.strftime("%Y-%m-%d_%H:%M:%S") )
subprocess.call('tar czf {path}/{tar_file} /dump/mongo_dump'.format(path='/dump',tar_file=tar_file),shell=True)
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import boto
# boto.set_stream_logger('boto')
try:
conn = S3Connection(constants.AWS_ACCESS_ID,constants.AWS_SECRET_ACCESS_KEY)
b = conn.get_bucket(AWS_BUCKET_NAME)
k = Key(b)
k.key = tar_file
bytes_saved = k.set_contents_from_filename( '/dump/'+tar_file )
timestamp = utils.datetimeNow()
print "[ %s ] done with backup of MongoDB to S3... %d bytes saved" % (str(timestamp),bytes_saved)
except:
error = traceback.format_exc()
timestamp = utils.datetimeNow()
print "[ %s ] FAILED TO CONNECT TO S3... saving locally" % str(timestamp)
print error
subprocess.call('rm {path}/{tar_file} /dump/mongo_dump'.format(path='/dump',tar_file=tar_file),shell=True)
| apache-2.0 |
waynegm/OpendTect-5-plugins | bin/python/wmpy/Filtering/ex_gradient3.py | 3 | 1704 | # Compute Gradients
#
# Prewitt External Attribute
#
import sys,os
import numpy as np
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
import extlib as xl
xa.params = {
'Inputs': ['Input'],
'Output' : ['Average Gradient', 'In-line gradient', 'Cross-line gradient', 'Z gradient'],
'ZSampMargin' : {'Value': [-1,1], 'Hidden': True},
'StepOut' : {'Value': [1,1], 'Hidden': True},
'Select': {'Name': 'Operator', 'Values': ['Scharr','Kroon'], 'Selection': 0},
'Parallel' : False,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
def doCompute():
inlpos = xa.SI['nrinl']//2
crlpos = xa.SI['nrcrl']//2
filt = xa.params['Select']['Selection']
while True:
xa.doInput()
indata = xa.Input['Input']
if filt==0:
xa.Output['In-line gradient'] = xl.scharr3_dx(indata, full=False)
xa.Output['Cross-line gradient'] = xl.scharr3_dy(indata, full=False)
xa.Output['Z gradient'] = xl.scharr3_dz(indata, full=False)
else:
xa.Output['In-line gradient'] = xl.kroon3(indata, axis=0)[inlpos,crlpos,:]
xa.Output['Cross-line gradient'] = xl.kroon3(indata, axis=1)[inlpos,crlpos,:]
xa.Output['Z gradient'] = xl.kroon3(indata, axis=-1)[inlpos,crlpos,:]
xa.Output['Average Gradient'] = ( xa.Output['In-line gradient']
+ xa.Output['Cross-line gradient']
+ xa.Output['Z gradient'] )/3
xa.doOutput()
xa.doCompute = doCompute
xa.run(sys.argv[1:])
| gpl-3.0 |
rschwager-mm/polymr | polymr/record.py | 1 | 2354 | import csv
from itertools import chain
from collections import namedtuple
from toolz import get
class Record(namedtuple("Record", ["fields", "pk", "data"])):
"""Indexing organizes records for easy lookup and searching finds the
closest record to a query. This class defines what a record is.
:param fields: The attributes used to find a record. Searchers
will supply something like these to find records and the indexer
will use these to organize the records for easy lookup
:type fields: tuple of str
:param pk: The primary key used to find this record in other
databases.
:type pk: str
:param data: The attributes not used to find a record, but you
want to store anyway.
:type pk: tuple of str
"""
pass
def _from_general(rows, searched_fields_idxs=None, pk_field_idx=None,
include_data=True):
a = next(rows)
allidxs = list(range(len(a)))
if searched_fields_idxs is None:
searched_fields_idxs = allidxs[:-1]
if pk_field_idx is None:
pk_field_idx = allidxs[-1]
elif pk_field_idx < 0:
pk_field_idx = allidxs[pk_field_idx]
data_idxs = [i for i in allidxs
if i not in set(chain([pk_field_idx], searched_fields_idxs))]
if include_data:
def _make(row):
return Record(tuple(get(searched_fields_idxs, row, "")),
get(pk_field_idx, row, ""),
tuple(get(data_idxs, row, "")))
else:
def _make(row):
return Record(tuple(get(searched_fields_idxs, row, "")),
get(pk_field_idx, row, ""),
tuple())
return map(_make, chain([a], rows))
def from_csv(f, searched_fields_idxs=None, pk_field_idx=None,
include_data=True):
rows = csv.reader(f)
return _from_general(rows, searched_fields_idxs,
pk_field_idx, include_data)
def from_psv(f, searched_fields_idxs=None, pk_field_idx=None,
include_data=True):
def _rows():
for line in f:
row = line.strip()
if row:
yield row.split('|')
return _from_general(_rows(), searched_fields_idxs,
pk_field_idx, include_data)
readers = dict(csv=from_csv,
psv=from_psv)
| apache-2.0 |
abought/osf.io | website/search/views.py | 10 | 10780 | # -*- coding: utf-8 -*-
import functools
import httplib as http
import json
import logging
import time
from urllib2 import unquote
import bleach
from flask import request
from modularodm import Q
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from framework import sentry
from website import language
from website import settings
from website.models import Node, User
from website.project.views.contributor import get_node_contributors_abbrev
from website.search import exceptions
from website.search import share_search
from website.search import util
from website.search.exceptions import IndexNotFoundError, MalformedQueryError
import website.search.search as search
from website.search.util import build_query
from website.util import api_url_for
logger = logging.getLogger(__name__)
RESULTS_PER_PAGE = 250
def handle_search_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.MalformedQueryError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad search query',
'message_long': language.SEARCH_QUERY_HELP,
})
except exceptions.SearchUnavailableError:
raise HTTPError(http.SERVICE_UNAVAILABLE, data={
'message_short': 'Search unavailable',
'message_long': ('Our search service is currently unavailable, if the issue persists, '
'please report it to <a href="mailto:support@osf.io">support@osf.io</a>.'),
})
except exceptions.SearchException:
# Interim fix for issue where ES fails with 500 in some settings- ensure exception is still logged until it can be better debugged. See OSF-4538
sentry.log_exception()
sentry.log_message('Elasticsearch returned an unexpected error response')
# TODO: Add a test; may need to mock out the error response due to inability to reproduce error code locally
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Could not perform search query',
'message_long': language.SEARCH_QUERY_HELP,
})
return wrapped
@handle_search_errors
def search_search(**kwargs):
_type = kwargs.get('type', None)
tick = time.time()
results = {}
if request.method == 'POST':
results = search.search(request.get_json(), doc_type=_type)
elif request.method == 'GET':
q = request.args.get('q', '*')
# TODO Match javascript params?
start = request.args.get('from', '0')
size = request.args.get('size', '10')
results = search.search(build_query(q, start, size), doc_type=_type)
results['time'] = round(time.time() - tick, 2)
return results
def conditionally_add_query_item(query, item, condition):
""" Helper for the search_projects_by_title function which will add a condition to a query
It will give an error if the proper search term is not used.
:param query: The modular ODM query that you want to modify
:param item: the field to query on
:param condition: yes, no, or either
:return: the modified query
"""
condition = condition.lower()
if condition == 'yes':
return query & Q(item, 'eq', True)
elif condition == 'no':
return query & Q(item, 'eq', False)
elif condition == 'either':
return query
raise HTTPError(http.BAD_REQUEST)
@must_be_logged_in
def search_projects_by_title(**kwargs):
""" Search for nodes by title. Can pass in arguments from the URL to modify the search
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
# TODO(fabianvf): At some point, it would be nice to do this with elastic search
user = kwargs['auth'].user
term = request.args.get('term', '')
max_results = int(request.args.get('maxResults', '10'))
category = request.args.get('category', 'project').lower()
is_deleted = request.args.get('isDeleted', 'no').lower()
is_collection = request.args.get('isFolder', 'no').lower()
is_registration = request.args.get('isRegistration', 'no').lower()
include_public = request.args.get('includePublic', 'yes').lower()
include_contributed = request.args.get('includeContributed', 'yes').lower()
ignore_nodes = request.args.getlist('ignoreNode', [])
matching_title = (
Q('title', 'icontains', term) & # search term (case insensitive)
Q('category', 'eq', category) # is a project
)
matching_title = conditionally_add_query_item(matching_title, 'is_deleted', is_deleted)
matching_title = conditionally_add_query_item(matching_title, 'is_collection', is_collection)
matching_title = conditionally_add_query_item(matching_title, 'is_registration', is_registration)
if len(ignore_nodes) > 0:
for node_id in ignore_nodes:
matching_title = matching_title & Q('_id', 'ne', node_id)
my_projects = []
my_project_count = 0
public_projects = []
if include_contributed == 'yes':
my_projects = Node.find(
matching_title &
Q('contributors', 'eq', user._id) # user is a contributor
).limit(max_results)
my_project_count = my_project_count
if my_project_count < max_results and include_public == 'yes':
public_projects = Node.find(
matching_title &
Q('is_public', 'eq', True) # is public
).limit(max_results - my_project_count)
results = list(my_projects) + list(public_projects)
ret = process_project_search_results(results, **kwargs)
return ret
@must_be_logged_in
def process_project_search_results(results, **kwargs):
"""
:param results: list of projects from the modular ODM search
:return: we return the entire search result, which is a list of
dictionaries. This includes the list of contributors.
"""
user = kwargs['auth'].user
ret = []
for project in results:
authors = get_node_contributors_abbrev(project=project, auth=kwargs['auth'])
authors_html = ''
for author in authors['contributors']:
a = User.load(author['user_id'])
authors_html += '<a href="%s">%s</a>' % (a.url, a.fullname)
authors_html += author['separator'] + ' '
authors_html += ' ' + authors['others_count']
ret.append({
'id': project._id,
'label': project.title,
'value': project.title,
'category': 'My Projects' if user in project.contributors else 'Public Projects',
'authors': authors_html,
})
return ret
@collect_auth
def search_contributor(auth):
user = auth.user if auth else None
nid = request.args.get('excludeNode')
exclude = Node.load(nid).contributors if nid else []
# TODO: Determine whether bleach is appropriate for ES payload. Also, inconsistent with website.sanitize.util.strip_html
query = bleach.clean(request.args.get('query', ''), tags=[], strip=True)
page = int(bleach.clean(request.args.get('page', '0'), tags=[], strip=True))
size = int(bleach.clean(request.args.get('size', '5'), tags=[], strip=True))
return search.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=user)
@handle_search_errors
def search_share():
tick = time.time()
results = {}
count = request.args.get('count') is not None
raw = request.args.get('raw') is not None
version = request.args.get('v')
if version:
index = settings.SHARE_ELASTIC_INDEX_TEMPLATE.format(version)
else:
index = settings.SHARE_ELASTIC_INDEX
if request.method == 'POST':
query = request.get_json()
elif request.method == 'GET':
query = build_query(
request.args.get('q', '*'),
request.args.get('from', 0),
request.args.get('size', 10),
sort=request.args.get('sort')
)
if count:
results = search.count_share(query, index=index)
else:
results = search.search_share(query, raw, index=index)
results['time'] = round(time.time() - tick, 2)
return results
@handle_search_errors
def search_share_stats():
q = request.args.get('q')
query = build_query(q, 0, 0) if q else {}
return search.share_stats(query=query)
@handle_search_errors
def search_share_atom(**kwargs):
json_query = request.args.get('jsonQuery')
start = util.compute_start(request.args.get('page', 1), RESULTS_PER_PAGE)
if not json_query:
q = request.args.get('q', '*')
sort = request.args.get('sort')
# we want the results per page to be constant between pages
# TODO - move this functionality into build_query in util
query = build_query(q, size=RESULTS_PER_PAGE, start=start, sort=sort)
else:
query = json.loads(unquote(json_query))
query['from'] = start
query['size'] = RESULTS_PER_PAGE
# Aggregations are expensive, and we really don't want to
# execute them if they won't be used
for field in ['aggs', 'aggregations']:
if query.get(field):
del query[field]
q = query # Do we really want to display this?
try:
search_results = search.search_share(query)
except MalformedQueryError:
raise HTTPError(http.BAD_REQUEST)
except IndexNotFoundError:
search_results = {
'count': 0,
'results': []
}
atom_url = api_url_for('search_share_atom', _xml=True, _absolute=True)
return util.create_atom_feed(
name='SHARE',
data=search_results['results'],
query=q,
size=RESULTS_PER_PAGE,
start=start,
url=atom_url,
to_atom=share_search.to_atom
)
def search_share_providers():
return search.share_providers()
| apache-2.0 |
AmrThabet/CouchPotatoServer | libs/tornado/platform/common.py | 285 | 3403 | """Lowest-common-denominator implementations of platform functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import socket
from tornado.platform import interface
class Waker(interface.Waker):
"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""
def __init__(self):
# Based on Zope select_trigger.py:
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
a.listen(1)
connect_address = a.getsockname() # assigned (host, port) pair
try:
self.writer.connect(connect_address)
break # success
except socket.error as detail:
if (not hasattr(errno, 'WSAEADDRINUSE') or
detail[0] != errno.WSAEADDRINUSE):
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.send(b"x")
except (IOError, socket.error):
pass
def consume(self):
try:
while True:
result = self.reader.recv(1024)
if not result:
break
except (IOError, socket.error):
pass
def close(self):
self.reader.close()
self.writer.close()
| gpl-3.0 |
LeZhang2016/openthread | tests/scripts/thread-cert/Cert_5_8_03_KeyIncrementRollOver.py | 3 | 3496 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
LEADER = 1
ROUTER = 2
class Cert_5_8_3_KeyIncrementRollOver(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_key_switch_guardtime(0)
self.nodes[LEADER].set_key_sequence_counter(127)
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_key_switch_guardtime(0)
self.nodes[ROUTER].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(4)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
addrs = self.nodes[ROUTER].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[LEADER].ping(addr))
key_sequence_counter = self.nodes[LEADER].get_key_sequence_counter()
self.nodes[LEADER].set_key_sequence_counter(key_sequence_counter + 1)
addrs = self.nodes[ROUTER].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
YetAnotherMinion/googletest | scripts/upload.py | 2511 | 51024 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| bsd-3-clause |
yanboliang/spark | examples/src/main/python/parquet_inputformat.py | 51 | 2390 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Read data file users.parquet in local Spark distro:
$ cd $SPARK_HOME
$ export AVRO_PARQUET_JARS=/path/to/parquet-avro-1.5.0.jar
$ ./bin/spark-submit --driver-class-path /path/to/example/jar \\
--jars $AVRO_PARQUET_JARS \\
./examples/src/main/python/parquet_inputformat.py \\
examples/src/main/resources/users.parquet
<...lots of log output...>
{u'favorite_color': None, u'name': u'Alyssa', u'favorite_numbers': [3, 9, 15, 20]}
{u'favorite_color': u'red', u'name': u'Ben', u'favorite_numbers': []}
<...more log output...>
"""
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
if __name__ == "__main__":
if len(sys.argv) != 2:
print("""
Usage: parquet_inputformat.py <data_file>
Run with example jar:
./bin/spark-submit --driver-class-path /path/to/example/jar \\
/path/to/examples/parquet_inputformat.py <data_file>
Assumes you have Parquet data stored in <data_file>.
""", file=sys.stderr)
sys.exit(-1)
path = sys.argv[1]
spark = SparkSession\
.builder\
.appName("ParquetInputFormat")\
.getOrCreate()
sc = spark.sparkContext
parquet_rdd = sc.newAPIHadoopFile(
path,
'org.apache.parquet.avro.AvroParquetInputFormat',
'java.lang.Void',
'org.apache.avro.generic.IndexedRecord',
valueConverter='org.apache.spark.examples.pythonconverters.IndexedRecordToJavaConverter')
output = parquet_rdd.map(lambda x: x[1]).collect()
for k in output:
print(k)
spark.stop()
| apache-2.0 |
vipul-sharma20/oh-mainline | vendor/packages/python-openid/openid/test/test_urinorm.py | 77 | 1298 | import os
import unittest
import openid.urinorm
class UrinormTest(unittest.TestCase):
def __init__(self, desc, case, expected):
unittest.TestCase.__init__(self)
self.desc = desc
self.case = case
self.expected = expected
def shortDescription(self):
return self.desc
def runTest(self):
try:
actual = openid.urinorm.urinorm(self.case)
except ValueError, why:
self.assertEqual(self.expected, 'fail', why)
else:
self.assertEqual(actual, self.expected)
def parse(cls, full_case):
desc, case, expected = full_case.split('\n')
case = unicode(case, 'utf-8')
return cls(desc, case, expected)
parse = classmethod(parse)
def parseTests(test_data):
result = []
cases = test_data.split('\n\n')
for case in cases:
case = case.strip()
if case:
result.append(UrinormTest.parse(case))
return result
def pyUnitTests():
here = os.path.dirname(os.path.abspath(__file__))
test_data_file_name = os.path.join(here, 'urinorm.txt')
test_data_file = file(test_data_file_name)
test_data = test_data_file.read()
test_data_file.close()
tests = parseTests(test_data)
return unittest.TestSuite(tests)
| agpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/network/f5/bigip_smtp.py | 38 | 17741 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_smtp
short_description: Manages SMTP settings on the BIG-IP
description:
- Allows configuring of the BIG-IP to send mail via an SMTP server by
configuring the parameters of an SMTP server.
version_added: 2.6
options:
name:
description:
- Specifies the name of the SMTP server configuration.
type: str
required: True
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
smtp_server:
description:
- SMTP server host name in the format of a fully qualified domain name.
- This value is required when create a new SMTP configuration.
type: str
smtp_server_port:
description:
- Specifies the SMTP port number.
- When creating a new SMTP configuration, the default is C(25) when
C(encryption) is C(none) or C(tls). The default is C(465) when C(ssl) is selected.
type: int
local_host_name:
description:
- Host name used in SMTP headers in the format of a fully qualified
domain name. This setting does not refer to the BIG-IP system's hostname.
type: str
from_address:
description:
- Email address that the email is being sent from. This is the "Reply-to"
address that the recipient sees.
type: str
encryption:
description:
- Specifies whether the SMTP server requires an encrypted connection in
order to send mail.
type: str
choices:
- none
- ssl
- tls
authentication:
description:
- Credentials can be set on an SMTP server's configuration even if that
authentication is not used (think staging configs or emergency changes).
This parameter acts as a switch to make the specified C(smtp_server_username)
and C(smtp_server_password) parameters active or not.
- When C(yes), the authentication parameters will be active.
- When C(no), the authentication parameters will be inactive.
type: bool
smtp_server_username:
description:
- User name that the SMTP server requires when validating a user.
type: str
smtp_server_password:
description:
- Password that the SMTP server requires when validating a user.
type: str
state:
description:
- When C(present), ensures that the SMTP configuration exists.
- When C(absent), ensures that the SMTP configuration does not exist.
type: str
choices:
- present
- absent
default: present
update_password:
description:
- Passwords are stored encrypted, so the module cannot know if the supplied
C(smtp_server_password) is the same or different than the existing password.
This parameter controls the updating of the C(smtp_server_password)
credential.
- When C(always), will always update the password.
- When C(on_create), will only set the password for newly created SMTP server
configurations.
type: str
choices:
- always
- on_create
default: always
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a base SMTP server configuration
bigip_smtp:
name: my-smtp
smtp_server: 1.1.1.1
smtp_server_username: mail-admin
smtp_server_password: mail-secret
local_host_name: smtp.mydomain.com
from_address: no-reply@mydomain.com
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
smtp_server:
description: The new C(smtp_server) value of the SMTP configuration.
returned: changed
type: str
sample: mail.mydomain.com
smtp_server_port:
description: The new C(smtp_server_port) value of the SMTP configuration.
returned: changed
type: int
sample: 25
local_host_name:
description: The new C(local_host_name) value of the SMTP configuration.
returned: changed
type: str
sample: smtp.mydomain.com
from_address:
description: The new C(from_address) value of the SMTP configuration.
returned: changed
type: str
sample: no-reply@mydomain.com
encryption:
description: The new C(encryption) value of the SMTP configuration.
returned: changed
type: str
sample: tls
authentication:
description: Whether the authentication parameters are active or not.
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import is_valid_hostname
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import is_valid_hostname
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'username': 'smtp_server_username',
'passwordEncrypted': 'smtp_server_password',
'localHostName': 'local_host_name',
'smtpServerHostName': 'smtp_server',
'smtpServerPort': 'smtp_server_port',
'encryptedConnection': 'encryption',
'authenticationEnabled': 'authentication_enabled',
'authenticationDisabled': 'authentication_disabled',
'fromAddress': 'from_address',
}
api_attributes = [
'username',
'passwordEncrypted',
'localHostName',
'smtpServerHostName',
'smtpServerPort',
'encryptedConnection',
'authenticationEnabled',
'authenticationDisabled',
'fromAddress',
]
returnables = [
'smtp_server_username',
'smtp_server_password',
'local_host_name',
'smtp_server',
'smtp_server_port',
'encryption',
'authentication',
'from_address',
]
updatables = [
'smtp_server_username',
'smtp_server_password',
'local_host_name',
'smtp_server',
'smtp_server_port',
'encryption',
'authentication',
'from_address',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def local_host_name(self):
if self._values['local_host_name'] is None:
return None
if is_valid_ip(self._values['local_host_name']):
return self._values['local_host_name']
elif is_valid_hostname(self._values['local_host_name']):
# else fallback to checking reasonably well formatted hostnames
return str(self._values['local_host_name'])
raise F5ModuleError(
"The provided 'local_host_name' value {0} is not a valid IP or hostname".format(
str(self._values['local_host_name'])
)
)
@property
def authentication_enabled(self):
if self._values['authentication'] is None:
return None
if self._values['authentication']:
return True
@property
def authentication_disabled(self):
if self._values['authentication'] is None:
return None
if not self._values['authentication']:
return True
@property
def smtp_server_port(self):
if self._values['smtp_server_port'] is None:
return None
return int(self._values['smtp_server_port'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def smtp_server_password(self):
return None
@property
def smtp_server_username(self):
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def smtp_server_password(self):
if self.want.update_password == 'on_create':
return None
return self.want.smtp_server_password
@property
def authentication(self):
if self.want.authentication_enabled:
if self.want.authentication_enabled != self.have.authentication_enabled:
return dict(
authentication_enabled=self.want.authentication_enabled
)
if self.want.authentication_disabled:
if self.want.authentication_disabled != self.have.authentication_disabled:
return dict(
authentication_disable=self.want.authentication_disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.want.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.want.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
smtp_server=dict(),
smtp_server_port=dict(type='int'),
smtp_server_username=dict(no_log=True),
smtp_server_password=dict(no_log=True),
local_host_name=dict(),
encryption=dict(choices=['none', 'ssl', 'tls']),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
from_address=dict(),
authentication=dict(type='bool'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
tjcsl/cslbot | cslbot/commands/urbit.py | 1 | 1294 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2018 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Tris Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import random
from ..helpers.command import Command
_BITS = (
'loom',
'arvo',
'hoon',
'pier',
'ames',
'zod',
'pill',
'clay',
'herb',
'dojo',
'jets',
'urbit',
)
@Command('urbit')
def cmd(send, msg, args):
"""An operating function.
Syntax: {command}
"""
words = []
for _ in range(3):
words.append(random.choice(_BITS))
send(' '.join(words))
| gpl-2.0 |
dronefly/dronefly.github.io | flask/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| apache-2.0 |
timabell/gpodder | src/gpodder/gtkui/interface/configeditor.py | 3 | 4660 | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
from xml.sax import saxutils
import gpodder
_ = gpodder.gettext
from gpodder.gtkui.config import ConfigModel
from gpodder.gtkui.interface.common import BuilderWidget
class gPodderConfigEditor(BuilderWidget):
finger_friendly_widgets = ['btnShowAll', 'btnClose', 'configeditor']
def new(self):
name_column = gtk.TreeViewColumn(_('Setting'))
name_renderer = gtk.CellRendererText()
name_column.pack_start(name_renderer)
name_column.add_attribute(name_renderer, 'text', 0)
name_column.add_attribute(name_renderer, 'style', 5)
self.configeditor.append_column(name_column)
value_column = gtk.TreeViewColumn(_('Set to'))
value_check_renderer = gtk.CellRendererToggle()
value_column.pack_start(value_check_renderer, expand=False)
value_column.add_attribute(value_check_renderer, 'active', 7)
value_column.add_attribute(value_check_renderer, 'visible', 6)
value_column.add_attribute(value_check_renderer, 'activatable', 6)
value_check_renderer.connect('toggled', self.value_toggled)
value_renderer = gtk.CellRendererText()
value_column.pack_start(value_renderer)
value_column.add_attribute(value_renderer, 'text', 2)
value_column.add_attribute(value_renderer, 'visible', 4)
value_column.add_attribute(value_renderer, 'editable', 4)
value_column.add_attribute(value_renderer, 'style', 5)
value_renderer.connect('edited', self.value_edited)
self.configeditor.append_column(value_column)
self.model = ConfigModel(self._config)
self.filter = self.model.filter_new()
self.filter.set_visible_func(self.visible_func)
self.configeditor.set_model(self.filter)
self.configeditor.set_rules_hint(True)
self.configeditor.get_selection().connect( 'changed',
self.on_configeditor_row_changed )
def visible_func(self, model, iter, user_data=None):
text = self.entryFilter.get_text().lower()
if text == '':
return True
else:
# either the variable name or its value
return (text in model.get_value(iter, 0).lower() or
text in model.get_value(iter, 2).lower())
def value_edited(self, renderer, path, new_text):
model = self.configeditor.get_model()
iter = model.get_iter(path)
name = model.get_value(iter, 0)
type_cute = model.get_value(iter, 1)
if not self._config.update_field(name, new_text):
message = _('Cannot set %(field)s to %(value)s. Needed data type: %(datatype)s')
d = {'field': saxutils.escape(name),
'value': saxutils.escape(new_text),
'datatype': saxutils.escape(type_cute)}
self.notification(message % d, _('Error setting option'))
def value_toggled(self, renderer, path):
model = self.configeditor.get_model()
iter = model.get_iter(path)
field_name = model.get_value(iter, 0)
field_type = model.get_value(iter, 3)
# Flip the boolean config flag
if field_type == bool:
self._config.toggle_flag(field_name)
def on_entryFilter_changed(self, widget):
self.filter.refilter()
def on_btnShowAll_clicked(self, widget):
self.entryFilter.set_text('')
self.entryFilter.grab_focus()
def on_btnClose_clicked(self, widget):
self.gPodderConfigEditor.destroy()
def on_gPodderConfigEditor_destroy(self, widget):
self.model.stop_observing()
def on_configeditor_row_changed(self, treeselection):
model, iter = treeselection.get_selected()
if iter is not None:
option_name = self._config.get_description( model.get(iter, 0)[0] )
self.config_option_description_label.set_text(option_name)
| gpl-3.0 |
promptworks/horizon | openstack_dashboard/dashboards/admin/images/forms.py | 52 | 2166 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images.images \
import forms as images_forms
class AdminCreateImageForm(images_forms.CreateImageForm):
pass
class AdminUpdateImageForm(images_forms.UpdateImageForm):
pass
class UpdateMetadataForm(forms.SelfHandlingForm):
def handle(self, request, data):
id = self.initial['id']
old_metadata = self.initial['metadata']
try:
new_metadata = json.loads(self.data['metadata'])
metadata = dict(
(item['key'], str(item['value']))
for item in new_metadata
)
remove_props = [key for key in old_metadata if key not in metadata]
api.glance.image_update_properties(request,
id,
remove_props,
**metadata)
message = _('Metadata successfully updated.')
messages.success(request, message)
except Exception:
exceptions.handle(request,
_('Unable to update the image metadata.'))
return False
return True
| apache-2.0 |
chouseknecht/ansible | lib/ansible/modules/cloud/vmware/vmware_export_ovf.py | 10 | 15676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_export_ovf
short_description: Exports a VMware virtual machine to an OVF file, device files and a manifest file
description: >
This module can be used to export a VMware virtual machine to OVF template from vCenter server or ESXi host.
version_added: '2.8'
author:
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
requirements:
- python >= 2.6
- PyVmomi
notes: []
options:
name:
description:
- Name of the virtual machine to export.
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- Uuid of the virtual machine to export.
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
version_added: '2.9'
type: str
datacenter:
default: ha-datacenter
description:
- Datacenter name of the virtual machine to export.
- This parameter is case sensitive.
type: str
folder:
description:
- Destination folder, absolute path to find the specified guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
export_dir:
description:
- Absolute path to place the exported files on the server running this task, must have write permission.
- If folder not exist will create it, also create a folder under this path named with VM name.
required: yes
type: path
export_with_images:
default: false
description:
- Export an ISO image of the media mounted on the CD/DVD Drive within the virtual machine.
type: bool
download_timeout:
description:
- The user defined timeout in minute of exporting file.
- If the vmdk file is too large to export in 10 minutes, specify the value larger than 10, the maximum value is 60.
default: 10
type: int
version_added: '2.9'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- vmware_export_ovf:
validate_certs: false
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
name: '{{ vm_name }}'
export_with_images: true
export_dir: /path/to/ovf_template/
delegate_to: localhost
'''
RETURN = r'''
instance:
description: list of the exported files, if exported from vCenter server, device file is not named with vm name
returned: always
type: dict
sample: None
'''
import os
import hashlib
from time import sleep
from threading import Thread
from ansible.module_utils.urls import open_url
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
try:
from pyVmomi import vim
from pyVim import connect
except ImportError:
pass
class LeaseProgressUpdater(Thread):
def __init__(self, http_nfc_lease, update_interval):
Thread.__init__(self)
self._running = True
self.httpNfcLease = http_nfc_lease
self.updateInterval = update_interval
self.progressPercent = 0
def set_progress_percent(self, progress_percent):
self.progressPercent = progress_percent
def stop(self):
self._running = False
def run(self):
while self._running:
try:
if self.httpNfcLease.state == vim.HttpNfcLease.State.done:
return
self.httpNfcLease.HttpNfcLeaseProgress(self.progressPercent)
sleep_sec = 0
while True:
if self.httpNfcLease.state == vim.HttpNfcLease.State.done or self.httpNfcLease.state == vim.HttpNfcLease.State.error:
return
sleep_sec += 1
sleep(1)
if sleep_sec == self.updateInterval:
break
except Exception:
return
class VMwareExportVmOvf(PyVmomi):
def __init__(self, module):
super(VMwareExportVmOvf, self).__init__(module)
self.mf_file = ''
self.ovf_dir = ''
# set read device content chunk size to 2 MB
self.chunk_size = 2 * 2 ** 20
# set lease progress update interval to 15 seconds
self.lease_interval = 15
self.facts = {'device_files': []}
self.download_timeout = 10
def create_export_dir(self, vm_obj):
self.ovf_dir = os.path.join(self.params['export_dir'], vm_obj.name)
if not os.path.exists(self.ovf_dir):
try:
os.makedirs(self.ovf_dir)
except OSError as err:
self.module.fail_json(msg='Exception caught when create folder %s, with error %s'
% (self.ovf_dir, to_text(err)))
self.mf_file = os.path.join(self.ovf_dir, vm_obj.name + '.mf')
def download_device_files(self, headers, temp_target_disk, device_url, lease_updater, total_bytes_written,
total_bytes_to_write):
mf_content = 'SHA256(' + os.path.basename(temp_target_disk) + ')= '
sha256_hash = hashlib.sha256()
with open(self.mf_file, 'a') as mf_handle:
with open(temp_target_disk, 'wb') as handle:
try:
response = open_url(device_url, headers=headers, validate_certs=False, timeout=self.download_timeout)
except Exception as err:
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Exception caught when getting %s, %s' % (device_url, to_text(err)))
if not response:
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Getting %s failed' % device_url)
if response.getcode() >= 400:
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Getting %s return code %d' % (device_url, response.getcode()))
current_bytes_written = 0
block = response.read(self.chunk_size)
while block:
handle.write(block)
sha256_hash.update(block)
handle.flush()
os.fsync(handle.fileno())
current_bytes_written += len(block)
block = response.read(self.chunk_size)
written_percent = ((current_bytes_written + total_bytes_written) * 100) / total_bytes_to_write
lease_updater.progressPercent = int(written_percent)
mf_handle.write(mf_content + sha256_hash.hexdigest() + '\n')
self.facts['device_files'].append(temp_target_disk)
return current_bytes_written
def export_to_ovf_files(self, vm_obj):
self.create_export_dir(vm_obj=vm_obj)
export_with_iso = False
if 'export_with_images' in self.params and self.params['export_with_images']:
export_with_iso = True
if 60 > self.params['download_timeout'] > 10:
self.download_timeout = self.params['download_timeout']
ovf_files = []
# get http nfc lease firstly
http_nfc_lease = vm_obj.ExportVm()
# create a thread to track file download progress
lease_updater = LeaseProgressUpdater(http_nfc_lease, self.lease_interval)
total_bytes_written = 0
# total storage space occupied by the virtual machine across all datastores
total_bytes_to_write = vm_obj.summary.storage.unshared
# new deployed VM with no OS installed
if total_bytes_to_write == 0:
total_bytes_to_write = vm_obj.summary.storage.committed
if total_bytes_to_write == 0:
http_nfc_lease.HttpNfcLeaseAbort()
self.module.fail_json(msg='Total storage space occupied by the VM is 0.')
headers = {'Accept': 'application/x-vnd.vmware-streamVmdk'}
cookies = connect.GetStub().cookie
if cookies:
headers['Cookie'] = cookies
lease_updater.start()
try:
while True:
if http_nfc_lease.state == vim.HttpNfcLease.State.ready:
for deviceUrl in http_nfc_lease.info.deviceUrl:
file_download = False
if deviceUrl.targetId and deviceUrl.disk:
file_download = True
elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'iso':
if export_with_iso:
file_download = True
elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'nvram':
if self.host_version_at_least(version=(6, 7, 0), vm_obj=vm_obj):
file_download = True
else:
continue
device_file_name = deviceUrl.url.split('/')[-1]
# device file named disk-0.iso, disk-1.vmdk, disk-2.vmdk, replace 'disk' with vm name
if device_file_name.split('.')[0][0:5] == "disk-":
device_file_name = device_file_name.replace('disk', vm_obj.name)
temp_target_disk = os.path.join(self.ovf_dir, device_file_name)
device_url = deviceUrl.url
# if export from ESXi host, replace * with hostname in url
# e.g., https://*/ha-nfc/5289bf27-da99-7c0e-3978-8853555deb8c/disk-1.vmdk
if '*' in device_url:
device_url = device_url.replace('*', self.params['hostname'])
if file_download:
current_bytes_written = self.download_device_files(headers=headers,
temp_target_disk=temp_target_disk,
device_url=device_url,
lease_updater=lease_updater,
total_bytes_written=total_bytes_written,
total_bytes_to_write=total_bytes_to_write)
total_bytes_written += current_bytes_written
ovf_file = vim.OvfManager.OvfFile()
ovf_file.deviceId = deviceUrl.key
ovf_file.path = device_file_name
ovf_file.size = current_bytes_written
ovf_files.append(ovf_file)
break
elif http_nfc_lease.state == vim.HttpNfcLease.State.initializing:
sleep(2)
continue
elif http_nfc_lease.state == vim.HttpNfcLease.State.error:
lease_updater.stop()
self.module.fail_json(msg='Get HTTP NFC lease error %s.' % http_nfc_lease.state.error[0].fault)
# generate ovf file
ovf_manager = self.content.ovfManager
ovf_descriptor_name = vm_obj.name
ovf_parameters = vim.OvfManager.CreateDescriptorParams()
ovf_parameters.name = ovf_descriptor_name
ovf_parameters.ovfFiles = ovf_files
vm_descriptor_result = ovf_manager.CreateDescriptor(obj=vm_obj, cdp=ovf_parameters)
if vm_descriptor_result.error:
http_nfc_lease.HttpNfcLeaseAbort()
lease_updater.stop()
self.module.fail_json(msg='Create VM descriptor file error %s.' % vm_descriptor_result.error)
else:
vm_descriptor = vm_descriptor_result.ovfDescriptor
ovf_descriptor_path = os.path.join(self.ovf_dir, ovf_descriptor_name + '.ovf')
sha256_hash = hashlib.sha256()
with open(self.mf_file, 'a') as mf_handle:
with open(ovf_descriptor_path, 'wb') as handle:
handle.write(vm_descriptor)
sha256_hash.update(vm_descriptor)
mf_handle.write('SHA256(' + os.path.basename(ovf_descriptor_path) + ')= ' + sha256_hash.hexdigest() + '\n')
http_nfc_lease.HttpNfcLeaseProgress(100)
# self.facts = http_nfc_lease.HttpNfcLeaseGetManifest()
http_nfc_lease.HttpNfcLeaseComplete()
lease_updater.stop()
self.facts.update({'manifest': self.mf_file, 'ovf_file': ovf_descriptor_path})
except Exception as err:
kwargs = {
'changed': False,
'failed': True,
'msg': to_text(err),
}
http_nfc_lease.HttpNfcLeaseAbort()
lease_updater.stop()
return kwargs
return {'changed': True, 'failed': False, 'instance': self.facts}
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
folder=dict(type='str'),
datacenter=dict(type='str', default='ha-datacenter'),
export_dir=dict(type='path', required=True),
export_with_images=dict(type='bool', default=False),
download_timeout=dict(type='int', default=10),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid', 'moid'],
],
)
pyv = VMwareExportVmOvf(module)
vm = pyv.get_vm()
if vm:
vm_facts = pyv.gather_facts(vm)
vm_power_state = vm_facts['hw_power_status'].lower()
if vm_power_state != 'poweredoff':
module.fail_json(msg='VM state should be poweredoff to export')
results = pyv.export_to_ovf_files(vm_obj=vm)
else:
module.fail_json(msg='The specified virtual machine not found')
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
henrytao-me/openerp.positionq | openerp/addons/subscription/subscription.py | 45 | 9019 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# TODO:
# Error treatment: exception, request, ... -> send request to user_id
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
class subscription_document(osv.osv):
_name = "subscription.document"
_description = "Subscription Document"
_columns = {
'name': fields.char('Name', size=60, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription document without removing it."),
'model': fields.many2one('ir.model', 'Object', required=True),
'field_ids': fields.one2many('subscription.document.fields', 'document_id', 'Fields')
}
_defaults = {
'active' : lambda *a: True,
}
subscription_document()
class subscription_document_fields(osv.osv):
_name = "subscription.document.fields"
_description = "Subscription Document Fields"
_rec_name = 'field'
_columns = {
'field': fields.many2one('ir.model.fields', 'Field', domain="[('model_id', '=', parent.model)]", required=True),
'value': fields.selection([('false','False'),('date','Current Date')], 'Default Value', size=40, help="Default value is considered for field when new document is generated."),
'document_id': fields.many2one('subscription.document', 'Subscription Document', ondelete='cascade'),
}
_defaults = {}
subscription_document_fields()
def _get_document_types(self, cr, uid, context=None):
cr.execute('select m.model, s.name from subscription_document s, ir_model m WHERE s.model = m.id order by s.name')
return cr.fetchall()
class subscription_subscription(osv.osv):
_name = "subscription.subscription"
_description = "Subscription"
_columns = {
'name': fields.char('Name', size=60, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription without removing it."),
'partner_id': fields.many2one('res.partner', 'Partner'),
'notes': fields.text('Notes'),
'user_id': fields.many2one('res.users', 'User', required=True),
'interval_number': fields.integer('Interval Qty'),
'interval_type': fields.selection([('days', 'Days'), ('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'exec_init': fields.integer('Number of documents'),
'date_init': fields.datetime('First Date'),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status'),
'doc_source': fields.reference('Source Document', required=True, selection=_get_document_types, size=128, help="User can choose the source document on which he wants to create documents"),
'doc_lines': fields.one2many('subscription.subscription.history', 'subscription_id', 'Documents created', readonly=True),
'cron_id': fields.many2one('ir.cron', 'Cron Job', help="Scheduler which runs on subscription", states={'running':[('readonly',True)], 'done':[('readonly',True)]}),
'note': fields.text('Notes', help="Description or Summary of Subscription"),
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'user_id': lambda obj,cr,uid,context: uid,
'active': lambda *a: True,
'interval_number': lambda *a: 1,
'interval_type': lambda *a: 'months',
'doc_source': lambda *a: False,
'state': lambda *a: 'draft'
}
def _auto_end(self, cr, context=None):
super(subscription_subscription, self)._auto_end(cr, context=context)
# drop the FK from subscription to ir.cron, as it would cause deadlocks
# during cron job execution. When model_copy() tries to write() on the subscription,
# it has to wait for an ExclusiveLock on the cron job record, but the latter
# is locked by the cron system for the duration of the job!
# FIXME: the subscription module should be reviewed to simplify the scheduling process
# and to use a unique cron job for all subscriptions, so that it never needs to
# be updated during its execution.
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (self._table, '%s_cron_id_fkey' % self._table))
def set_process(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
mapping = {'name':'name','interval_number':'interval_number','interval_type':'interval_type','exec_init':'numbercall','date_init':'nextcall'}
res = {'model':'subscription.subscription', 'args': repr([[row['id']]]), 'function':'model_copy', 'priority':6, 'user_id':row['user_id'] and row['user_id'][0]}
for key,value in mapping.items():
res[value] = row[key]
id = self.pool.get('ir.cron').create(cr, uid, res)
self.write(cr, uid, [row['id']], {'cron_id':id, 'state':'running'})
return True
def model_copy(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
if not row.get('cron_id',False):
continue
cron_ids = [row['cron_id'][0]]
remaining = self.pool.get('ir.cron').read(cr, uid, cron_ids, ['numbercall'])[0]['numbercall']
try:
(model_name, id) = row['doc_source'].split(',')
id = int(id)
model = self.pool.get(model_name)
except:
raise osv.except_osv(_('Wrong Source Document!'), _('Please provide another source document.\nThis one does not exist!'))
default = {'state':'draft'}
doc_obj = self.pool.get('subscription.document')
document_ids = doc_obj.search(cr, uid, [('model.model','=',model_name)])
doc = doc_obj.browse(cr, uid, document_ids)[0]
for f in doc.field_ids:
if f.value=='date':
value = time.strftime('%Y-%m-%d')
else:
value = False
default[f.field.name] = value
state = 'running'
# if there was only one remaining document to generate
# the subscription is over and we mark it as being done
if remaining == 1:
state = 'done'
id = self.pool.get(model_name).copy(cr, uid, id, default, context)
self.pool.get('subscription.subscription.history').create(cr, uid, {'subscription_id': row['id'], 'date':time.strftime('%Y-%m-%d %H:%M:%S'), 'document_id': model_name+','+str(id)})
self.write(cr, uid, [row['id']], {'state':state})
return True
def unlink(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context or {}):
if record.state=="running":
raise osv.except_osv(_('Error!'),_('You cannot delete an active subscription!'))
return super(subscription_subscription, self).unlink(cr, uid, ids, context)
def set_done(self, cr, uid, ids, context=None):
res = self.read(cr,uid, ids, ['cron_id'])
ids2 = [x['cron_id'][0] for x in res if x['id']]
self.pool.get('ir.cron').write(cr, uid, ids2, {'active':False})
self.write(cr, uid, ids, {'state':'done'})
return True
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return True
subscription_subscription()
class subscription_subscription_history(osv.osv):
_name = "subscription.subscription.history"
_description = "Subscription history"
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date'),
'subscription_id': fields.many2one('subscription.subscription', 'Subscription', ondelete='cascade'),
'document_id': fields.reference('Source Document', required=True, selection=_get_document_types, size=128),
}
subscription_subscription_history()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
theundefined/scripts | uke.py | 1 | 2325 | #!/usr/bin/python
from xml.dom import minidom
import re
import sys
import time
import pickle
if len(sys.argv) < 2:
sys.stderr.write('Usage: sys.argv[0] numer\n')
sys.exit(1)
numer=sys.argv[1]
def loaddata():
#otwieramy plik w parserze
DOMTree = minidom.parse('T1-PSTN.xml')
cNodes = DOMTree.childNodes
uke={}
idn=0
for i in cNodes[0].getElementsByTagName("pstn"):
# <pstn>
# <numer>SP=19</numer>
# <strefa>
# <nazwa>SN Krakow</nazwa>
# <ab>12</ab>
# </strefa>
# <operator>Rezerwa techniczna</operator>
# <obszar>CALA SN</obszar>
# <blokada>false</blokada>
# </pstn>
uke[idn]={}
uke[idn]["ab"]=i.getElementsByTagName("ab")[0].childNodes[0].wholeText.encode('ascii', 'ignore')
uke[idn]["operator"]=i.getElementsByTagName("operator")[0].childNodes[0].wholeText.encode('ascii', 'ignore')
t=i.getElementsByTagName("numer")[0].childNodes[0].wholeText.encode('ascii', 'ignore').split("=")
uke[idn]["dopasowanie"]=t[0]
#print "^" + uke[idn]["ab"] + t[1].replace("(","[").replace(")","]").replace(",","|"),uke[idn]["operator"]
uke[idn]["reg"]="^" + uke[idn]["ab"] + "(" + t[1].replace("(","[").replace(")","]").replace(",","|").replace("-0","-90") + ")"
uke[idn]["re"]=re.compile(uke[idn]["reg"])
# print idn,ab, numer, operator
idn+=1
# dostep do atrybutu
#print i.getElementsByTagName("imie")[0].getAttribute("foo")
return uke
epoch=time.time()
saveddata={}
try:
f=open('/tmp/uke.cache','rb')
saveddata=pickle.load(f)
except:
saveddata['data']=loaddata()
saveddata['epoch']=epoch
pickle.dump(saveddata, open('/tmp/uke.cache', 'wb'))
if int(saveddata['epoch']) + 3600 < epoch:
# print "refresh"
saveddata['data']=loaddata()
saveddata['epoch']=epoch
pickle.dump(saveddata, open('/tmp/uke.cache', 'wb'))
uke=saveddata['data']
if numer=='-':
for numer in sys.stdin:
op=None
for i in uke:
if uke[i]["re"].match(numer):
op=uke[i]["operator"]
if op==None:
print numer.strip() + ',nieznany'
else:
print numer.strip() + ',' + op
else:
for i in uke:
if uke[i]["re"].match(numer):
print uke[i]["operator"]
| gpl-2.0 |
Just-D/chromium-1 | testing/scripts/telemetry_unittests.py | 20 | 1354 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
filter_tests = []
if args.filter_file:
filter_tests = json.load(args.filter_file)
with common.temporary_file() as tempfile_path:
rc = common.run_runtest(args, [
'--test-type', 'telemetry_unittests',
'--run-python-script',
os.path.join(common.SRC_DIR, 'tools', 'telemetry', 'run_tests'),
'--browser', args.build_config_fs.lower(),
'--retry-limit', '3',
'--write-full-results-to', tempfile_path,
] + filter_tests)
with open(tempfile_path) as f:
results = json.load(f)
parsed_results = common.parse_common_test_results(results, test_separator='.')
failures = parsed_results['unexpected_failures']
json.dump({
'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
((rc == 0) or failures)),
'failures': failures.keys(),
}, args.output)
return rc
def main_compile_targets(args):
json.dump(['chrome'], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/python/kernel_tests/as_string_op_test.py | 11 | 9058 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for as_string_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class AsStringOpTest(test.TestCase):
def testFloat(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
]
with self.test_session():
for dtype in (dtypes.float32, dtypes.float64):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
s = lambda strs: [x.decode("ascii") for x in strs]
self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: float_inputs_})
self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: float_inputs_})
with self.assertRaisesOpError("Fill string must be one or fewer"):
output = string_ops.as_string(input_, fill="ab")
output.eval(feed_dict={input_: float_inputs_})
def testInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])
output = string_ops.as_string(input_, width=3, fill="0")
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, scientific=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("scientific and shortest"):
output = string_ops.as_string(input_, shortest=True)
output.eval(feed_dict={input_: int_inputs_})
with self.assertRaisesOpError("precision not supported"):
output = string_ops.as_string(input_, precision=0)
output.eval(feed_dict={input_: int_inputs_})
def testLargeInt(self):
# Cannot use values outside -128..127 for test, because we're also
# testing int8
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
input_ = array_ops.placeholder(dtypes.int32)
int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
input_ = array_ops.placeholder(dtypes.int64)
int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
def testHalfInt(self):
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
input_ = array_ops.placeholder(dtypes.int16)
int_inputs_ = [np.iinfo(np.int16).min, np.iinfo(np.int16).max]
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: int_inputs_})
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
def testBool(self):
bool_inputs_ = [False, True]
s = lambda strs: [x.decode("ascii") for x in strs]
with self.test_session():
for dtype in (dtypes.bool,):
input_ = array_ops.placeholder(dtype)
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: bool_inputs_})
self.assertAllEqual(s(result), ["false", "true"])
def testComplex(self):
float_inputs_ = [
0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
complex("-INF")
]
complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]
with self.test_session():
for dtype in (dtypes.complex64,):
input_ = array_ops.placeholder(dtype)
def clean_nans(s_l):
return [s.decode("ascii").replace("-nan", "nan") for s in s_l]
output = string_ops.as_string(input_, shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, scientific=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(input_, precision=10, width=3)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])
output = string_ops.as_string(
input_, precision=10, width=3, fill="0", shortest=True)
result = output.eval(feed_dict={input_: complex_inputs_})
self.assertAllEqual(
clean_nans(result),
["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])
with self.assertRaisesOpError("Cannot select both"):
output = string_ops.as_string(input_, scientific=True, shortest=True)
output.eval(feed_dict={input_: complex_inputs_})
if __name__ == "__main__":
test.main()
| apache-2.0 |
chand3040/cloud_that | common/djangoapps/terrain/stubs/tests/test_lti_stub.py | 172 | 4269 | """
Unit tests for stub LTI implementation.
"""
from mock import Mock, patch
import unittest
import urllib2
import requests
from terrain.stubs.lti import StubLtiService
class StubLtiServiceTest(unittest.TestCase):
"""
A stub of the LTI provider that listens on a local
port and responds with pre-defined grade messages.
Used for lettuce BDD tests in lms/courseware/features/lti.feature
"""
def setUp(self):
super(StubLtiServiceTest, self).setUp()
self.server = StubLtiService()
self.uri = 'http://127.0.0.1:{}/'.format(self.server.port)
self.launch_uri = self.uri + 'correct_lti_endpoint'
self.addCleanup(self.server.shutdown)
self.payload = {
'user_id': 'default_user_id',
'roles': 'Student',
'oauth_nonce': '',
'oauth_timestamp': '',
'oauth_consumer_key': 'test_client_key',
'lti_version': 'LTI-1p0',
'oauth_signature_method': 'HMAC-SHA1',
'oauth_version': '1.0',
'oauth_signature': '',
'lti_message_type': 'basic-lti-launch-request',
'oauth_callback': 'about:blank',
'launch_presentation_return_url': '',
'lis_outcome_service_url': 'http://localhost:8001/test_callback',
'lis_result_sourcedid': '',
'resource_link_id': '',
}
def test_invalid_request_url(self):
"""
Tests that LTI server processes request with right program path but with wrong header.
"""
self.launch_uri = self.uri + 'wrong_lti_endpoint'
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('Invalid request URL', response.content)
def test_wrong_signature(self):
"""
Tests that LTI server processes request with right program
path and responses with incorrect signature.
"""
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('Wrong LTI signature', response.content)
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_success_response_launch_lti(self, check_oauth):
"""
Success lti launch.
"""
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_send_graded_result(self, verify_hmac): # pylint: disable=unused-argument
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
grade_uri = self.uri + 'grade'
with patch('terrain.stubs.lti.requests.post') as mocked_post:
mocked_post.return_value = Mock(content='Test response', status_code=200)
response = urllib2.urlopen(grade_uri, data='')
self.assertIn('Test response', response.read())
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_lti20_outcomes_put(self, verify_hmac): # pylint: disable=unused-argument
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
grade_uri = self.uri + 'lti2_outcome'
with patch('terrain.stubs.lti.requests.put') as mocked_put:
mocked_put.return_value = Mock(status_code=200)
response = urllib2.urlopen(grade_uri, data='')
self.assertIn('LTI consumer (edX) responded with HTTP 200', response.read())
@patch('terrain.stubs.lti.signature.verify_hmac_sha1', return_value=True)
def test_lti20_outcomes_put_like_delete(self, verify_hmac): # pylint: disable=unused-argument
response = requests.post(self.launch_uri, data=self.payload)
self.assertIn('This is LTI tool. Success.', response.content)
grade_uri = self.uri + 'lti2_delete'
with patch('terrain.stubs.lti.requests.put') as mocked_put:
mocked_put.return_value = Mock(status_code=200)
response = urllib2.urlopen(grade_uri, data='')
self.assertIn('LTI consumer (edX) responded with HTTP 200', response.read())
| agpl-3.0 |
tashaband/RYU295 | ryu/Gui/home.py | 1 | 4266 | import bottle
from bottle import route, run, request, abort, debug, template , static_file
import MySQLdb as mdb
@route('/packets', method='GET')
def packets_list():
print "list all received packets and their protocols"
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
cursor.execute("SELECT * FROM packets")
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('packets', rows=result)
@route('/home', method='GET')
@route('/', method='GET')
def display_home():
print "home page"
return template('home')
@route('/packets_filter', method='POST')
def packets_list_filtered():
print "list all received packets and their protocols-filtered"
filter_name = request.forms.get('filter_name')
filter_param = request.forms.get('filter_param')
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
query= "SELECT * FROM packets where %s = '%s'"%(filter_name, filter_param)
print query
cursor.execute(query)
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/packets')
return template('packets', rows=result)
@route('/packets-ip/:ipaddr', method='GET')
def packets_list_filtered(ipaddr):
print "list all received packets and their protocols-filtered for given ip address"
filter_name = request.forms.get('filter_name')
filter_param = request.forms.get('filter_param')
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
query= "SELECT * FROM packets where sourceip = '%s' or destip = '%s'"%(ipaddr, ipaddr)
print query
cursor.execute(query)
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('packets', rows=result)
@route('/attacks_filter', method='POST')
def attacks_list_filtered():
print "list all attacks-filtered"
filter_name = request.forms.get('filter_name')
filter_param = request.forms.get('filter_param')
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
query= "SELECT * FROM attacks where %s = '%s'"%(filter_name, filter_param)
print query
cursor.execute(query)
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/attacks')
return template('packets', rows=result)
@route('/attacks', method='GET')
def attacks_list():
print "list all attacks caught"
dbcon = mdb.connect("localhost","testuser","test123","attackdb" )
cursor = dbcon.cursor()
cursor.execute("SELECT * FROM attacks")
result = cursor.fetchall()
if not result:
return template('emptyTable', back_url='/home')
return template('attacks', rows=result)
@route('/rules', method='GET')
def rules_list():
print "list all attacks rules"
fname = '/home/ubuntu/RYU295/ryu/lib/ids/rules.txt'
with open(fname) as f:
rules = f.readlines()
return template('rules', rows=rules)
@route('/editRules', method='GET')
def edit_rules():
print "Edit attacks rules"
fname = '/home/ubuntu/RYU295/ryu/lib/ids/rules.txt'
with open(fname) as f:
rules = f.read()
print rules
return template('editRules', rows=rules)
@route('/rules', method='POST')
def change_rules():
print "change attacks rules"
post_rules = request.forms.get('rule_data')
print "new rules : ", post_rules
fname = '/home/ubuntu/RYU295/ryu/lib/ids/rules.txt'
open(fname,'w').close()
f = open(fname, 'w')
f.write(post_rules)
f.close()
with open(fname) as f:
rules = f.readlines()
return template('rules', rows=rules)
@route('/<filename:re:.*\.css>')
@route('/packets-ip/<filename:re:.*\.css>')
def stylesheets(filename):
return static_file(filename, root='static/css')
@route('/<filename:re:.*\.png>')
@route('/packets-ip/<filename:re:.*\.png>')
def images(filename):
return static_file(filename, root='static/img')
@route('/<filename:re:.*\.js>')
def javascriptFiles(filename):
return static_file(filename, root='static/js')
debug(True)
run(reloader=True)
| apache-2.0 |
catapult-project/catapult | common/py_vulcanize/third_party/rjsmin/setup.py | 20 | 1181 | #!/usr/bin/env python
# -*- coding: ascii -*-
#
# Copyright 2006 - 2013
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys as _sys
from _setup import run
def setup(args=None, _manifest=0):
""" Main setup function """
from _setup.ext import Extension
if 'java' in _sys.platform.lower():
# no c extension for jython
ext = None
else:
ext=[Extension('_rjsmin', sources=['rjsmin.c'])]
return run(script_args=args, ext=ext, manifest_only=_manifest)
def manifest():
""" Create List of packaged files """
return setup((), _manifest=1)
if __name__ == '__main__':
setup()
| bsd-3-clause |
renmengye/imageqa-public | src/nn/lstm_test.py | 1 | 4523 | from sequential import *
from lstm_old import *
from dropout import *
from reshape import *
from lut import *
from lstm import *
import unittest
class LSTM_Recurrent_Real_Tests(unittest.TestCase):
def test_all(self):
trainInput = np.loadtxt('lstm_test_input.csv', delimiter=',')
trainInput = trainInput.reshape(trainInput.shape[0], trainInput.shape[1], 1)
trainTarget = np.loadtxt('lstm_test_target.csv', delimiter=',')
trainTarget = trainTarget.reshape(trainTarget.shape[0], 1)
wordEmbed = np.loadtxt('lstm_test_word.csv', delimiter=',')
D = 300
D2 = 10
N = trainInput.shape[0]
Time = trainInput.shape[1]
multiOutput = False
time_unfold = TimeUnfold()
lut = LUT(
inputDim=np.max(trainInput)+1,
outputDim=D,
inputNames=None,
needInit=False,
initWeights=wordEmbed
)
time_fold = TimeFold(
timespan=Time,
inputNames=None
)
dropout = Dropout(
name='d1',
dropoutRate=0.2,
initSeed=2,
inputNames=None,
outputDim=D2
)
dropout2 = Dropout(
name='d2',
dropoutRate=0.2,
initSeed=2,
inputNames=None,
outputDim=D2
)
lstm = LSTM(
name='lstm',
timespan=Time,
inputDim=D,
outputDim=D2,
inputNames=None,
multiOutput=multiOutput,
cutOffZeroEnd=True,
learningRate=0.8,
momentum=0.9,
outputdEdX=True)
lstm2 = LSTM_Old(
name='lstm',
inputDim=D,
outputDim=D2,
needInit=False,
initRange=0.1,
initSeed=0,
cutOffZeroEnd=True,
multiErr=multiOutput,
learningRate=0.8,
momentum=0.9,
outputdEdX=True)
sig = Map(
name='sig',
outputDim=1,
activeFn=SigmoidActiveFn(),
initRange=0.1,
initSeed=5,
learningRate=0.01,
momentum=0.9,
weightClip=10.0,
gradientClip=0.1,
weightRegConst=0.00005
)
sig2 = Map(
name='sig',
outputDim=1,
activeFn=SigmoidActiveFn(),
initRange=0.1,
initSeed=5,
learningRate=0.01,
momentum=0.9,
weightClip=10.0,
gradientClip=0.1,
weightRegConst=0.00005
)
costFn = crossEntOne
model1 = Sequential(
stages=[
time_unfold,
lut,
time_fold,
dropout,
lstm,
sig
]
)
model2 = Sequential(
stages=[
time_unfold,
lut,
time_fold,
dropout2,
lstm2,
sig2
]
)
input_ = trainInput[0:N, 0:Time]
target_ = trainTarget[0:N]
Y1 = model1.forward(input_)
W = lstm.getWeights()
lstm2.W = W.transpose()
Y2 = model2.forward(input_)
self.chkEqual(Y1, Y2)
E, dEdY1 = costFn(Y1, target_)
E, dEdY2 = costFn(Y2, target_)
model1.backward(dEdY1)
model2.backward(dEdY2)
dEdW = lstm.getGradient()
self.chkEqual(dEdW.transpose(), lstm2.dEdW)
lstm.updateWeights()
lstm2.updateWeights()
W = lstm.getWeights()
self.chkEqual(W.transpose(), lstm2.W)
def chkEqual(self, a, b):
tolerance = 1e-1
a = a.reshape(a.size)
b = b.reshape(b.size)
for i in range(a.size):
if not ((a[i] == 0 and b[i] == 0) or
(np.abs(a[i]) < 1e-7 and np.abs(b[i]) < 1e-7) or
(np.abs(a[i] / b[i] - 1) < tolerance)):
print a[i], b[i], a[i]/b[i]
self.assertTrue(
(a[i] == 0 and b[i] == 0) or
(np.abs(a[i]) < 1e-7 and np.abs(b[i]) < 1e-7) or
(np.abs(a[i] / b[i] - 1) < tolerance))
if __name__ == '__main__':
unittest.main() | mit |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/ctypes/test/test_find.py | 81 | 2468 | import unittest
import sys
from ctypes import *
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
if sys.platform == "win32":
lib_gl = find_library("OpenGL32")
lib_glu = find_library("Glu32")
lib_gle = None
elif sys.platform == "darwin":
lib_gl = lib_glu = find_library("OpenGL")
lib_gle = None
else:
lib_gl = find_library("GL")
lib_glu = find_library("GLU")
lib_gle = find_library("gle")
## print, for debugging
if is_resource_enabled("printing"):
if lib_gl or lib_glu or lib_gle:
print "OpenGL libraries:"
for item in (("GL", lib_gl),
("GLU", lib_glu),
("gle", lib_gle)):
print "\t", item
# On some systems, loading the OpenGL libraries needs the RTLD_GLOBAL mode.
class Test_OpenGL_libs(unittest.TestCase):
def setUp(self):
self.gl = self.glu = self.gle = None
if lib_gl:
self.gl = CDLL(lib_gl, mode=RTLD_GLOBAL)
if lib_glu:
self.glu = CDLL(lib_glu, RTLD_GLOBAL)
if lib_gle:
try:
self.gle = CDLL(lib_gle)
except OSError:
pass
if lib_gl:
def test_gl(self):
if self.gl:
self.gl.glClearIndex
if lib_glu:
def test_glu(self):
if self.glu:
self.glu.gluBeginCurve
if lib_gle:
def test_gle(self):
if self.gle:
self.gle.gleGetJoinStyle
##if os.name == "posix" and sys.platform != "darwin":
## # On platforms where the default shared library suffix is '.so',
## # at least some libraries can be loaded as attributes of the cdll
## # object, since ctypes now tries loading the lib again
## # with '.so' appended of the first try fails.
## #
## # Won't work for libc, unfortunately. OTOH, it isn't
## # needed for libc since this is already mapped into the current
## # process (?)
## #
## # On MAC OSX, it won't work either, because dlopen() needs a full path,
## # and the default suffix is either none or '.dylib'.
## class LoadLibs(unittest.TestCase):
## def test_libm(self):
## import math
## libm = cdll.libm
## sqrt = libm.sqrt
## sqrt.argtypes = (c_double,)
## sqrt.restype = c_double
## self.assertEqual(sqrt(2), math.sqrt(2))
if __name__ == "__main__":
unittest.main()
| mit |
gdsfactory/gdsfactory | pp/components/mzi_lattice.py | 1 | 3010 | from typing import Dict, Tuple, Union
from pp.cell import cell
from pp.component import Component
from pp.tech import FACTORY, Factory
from pp.types import Number
@cell
def mzi_lattice(
coupler_lengths: Tuple[Number, ...] = (10.0, 20.0),
coupler_gaps: Tuple[Number, ...] = (0.2, 0.3),
delta_lengths: Tuple[Number, ...] = (10.0,),
mzi: Union[str, Dict] = "mzi",
splitter: str = "coupler",
straight: Union[str, Dict] = "straight",
factory: Factory = FACTORY,
**kwargs
) -> Component:
r"""Mzi lattice filter.
.. code::
______ ______
| | | |
| | | |
cp1==| |===cp2=====| |=== .... ===cp_last===
| | | |
| | | |
DL1 | DL2 |
| | | |
|______| | |
|______|
"""
assert len(coupler_lengths) == len(coupler_gaps)
assert len(coupler_lengths) == len(delta_lengths) + 1
c = Component()
get = factory.get_component
splitter_settings = dict(
component=splitter, gap=coupler_gaps[0], length=coupler_lengths[0]
)
combiner_settings = dict(
component=splitter, gap=coupler_gaps[1], length=coupler_lengths[1]
)
cp1 = get(**splitter_settings)
sprevious = c << get(
component=mzi,
with_splitter=True,
delta_length=delta_lengths[0],
straight=straight,
combiner=combiner_settings,
splitter=splitter_settings,
**kwargs
)
stages = []
for length, gap, delta_length in zip(
coupler_lengths[2:], coupler_gaps[2:], delta_lengths[1:]
):
splitter_settings = dict(
component=splitter, gap=coupler_gaps[1], length=coupler_lengths[1]
)
combiner_settings = dict(component=splitter, length=length, gap=gap)
stage = c << get(
component=mzi,
with_splitter=False,
delta_length=delta_length,
straight=straight,
splitter=splitter_settings,
combiner=combiner_settings,
**kwargs
)
splitter_settings = combiner_settings
stages.append(stage)
for stage in stages:
stage.connect("W0", sprevious.ports["E0"])
stage.connect("W1", sprevious.ports["E1"])
sprevious = stage
for port in cp1.get_ports_list(prefix="W"):
c.add_port(port.name, port=port)
for port in sprevious.get_ports_list(prefix="E"):
c.add_port(port.name, port=port)
return c
if __name__ == "__main__":
cpl = [10, 20, 30]
cpg = [0.1, 0.2, 0.3]
dl0 = [100, 200]
# cpl = [10, 20, 30, 40]
# cpg = [0.2, 0.3, 0.5, 0.5]
# dl0 = [0, 50, 100]
c = mzi_lattice(
coupler_lengths=cpl, coupler_gaps=cpg, delta_lengths=dl0, length_x=10
)
c.show()
| mit |
tempbottle/Nuitka | tests/basics/MainPrograms.py | 2 | 1458 | # Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
print("Module name is", __name__)
class SomeClass:
pass
print("Class inside main module names its module as", repr(SomeClass.__module__))
if __name__ == "__main__":
print("Executed as __main__:")
import sys, os
# The sys.argv[0] might contain ".exe", ".py" or no suffix at all.
# Remove it, so the "diff" output is more acceptable.
args = sys.argv[:]
args[0] = os.path.basename(args[0]).replace(".exe", ".py").replace(".py", "")
print("Arguments were (stripped argv[0] suffix):", repr(args))
# Output the flags, so we can test if we are compatible with these too.
print("The sys.flags are:", sys.flags)
| apache-2.0 |
bonitadecker77/python-for-android | python3-alpha/extra_modules/pyxmpp2/xmppstringprep.py | 46 | 8370 | #
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint treats "import stringprep" like depreciated "import string"
# pylint: disable-msg=W0402
"""Nodeprep and resourceprep stringprep profiles.
Normative reference:
- `RFC 6122 <http://xmpp.org/rfcs/rfc6122.html>`__
- `RFC 3454 <http://tools.ietf.org/html/rfc3454>`__
"""
__docformat__ = "restructuredtext en"
import stringprep
import unicodedata
from .exceptions import StringprepError
def b1_mapping(char):
"""Do RFC 3454 B.1 table mapping.
:Parameters:
- `char`: Unicode character to map.
:returns: u"" if there is `char` code in the table, `None` otherwise.
"""
if stringprep.in_table_b1(char):
return ""
else:
return None
def nfkc(data):
"""Do NFKC normalization of Unicode data.
:Parameters:
- `data`: list of Unicode characters or Unicode string.
:return: normalized Unicode string."""
if isinstance(data, list):
data = "".join(data)
return unicodedata.normalize("NFKC", data)
class Profile(object):
"""Base class for stringprep profiles.
"""
cache_items = []
def __init__(self, unassigned, mapping, normalization, prohibited,
bidi = True):
"""Initialize Profile object.
:Parameters:
- `unassigned`: the lookup table with unassigned codes
- `mapping`: the lookup table with character mappings
- `normalization`: the normalization function
- `prohibited`: the lookup table with prohibited characters
- `bidi`: if True then bidirectional checks should be done
:Types:
- `unassigned`: tuple of functions
- `mapping`: tuple of functions
- `normalization`: tuple of functions
- `prohibited`: tuple of functions
- `bidi`: `bool`
"""
# pylint: disable-msg=R0913
self.unassigned = unassigned
self.mapping = mapping
self.normalization = normalization
self.prohibited = prohibited
self.bidi = bidi
self.cache = {}
def prepare(self, data):
"""Complete string preparation procedure for 'stored' strings.
(includes checks for unassigned codes)
:Parameters:
- `data`: Unicode string to prepare.
:return: prepared string
:raise StringprepError: if the preparation fails
"""
ret = self.cache.get(data)
if ret is not None:
return ret
result = self.map(data)
if self.normalization:
result = self.normalization(result)
result = self.prohibit(result)
result = self.check_unassigned(result)
if self.bidi:
result = self.check_bidi(result)
if isinstance(result, list):
result = "".join()
if len(self.cache_items) >= _stringprep_cache_size:
remove = self.cache_items[: -_stringprep_cache_size // 2]
for profile, key in remove:
try:
del profile.cache[key]
except KeyError:
pass
self.cache_items[:] = self.cache_items[
-_stringprep_cache_size // 2 :]
self.cache_items.append((self, data))
self.cache[data] = result
return result
def prepare_query(self, data):
"""Complete string preparation procedure for 'query' strings.
(without checks for unassigned codes)
:Parameters:
- `data`: Unicode string to prepare.
:return: prepared string
:raise StringprepError: if the preparation fails
"""
data = self.map(data)
if self.normalization:
data = self.normalization(data)
data = self.prohibit(data)
if self.bidi:
data = self.check_bidi(data)
if isinstance(data, list):
data = "".join(data)
return data
def map(self, data):
"""Mapping part of string preparation."""
result = []
for char in data:
ret = None
for lookup in self.mapping:
ret = lookup(char)
if ret is not None:
break
if ret is not None:
result.append(ret)
else:
result.append(char)
return result
def prohibit(self, data):
"""Checks for prohibited characters."""
for char in data:
for lookup in self.prohibited:
if lookup(char):
raise StringprepError("Prohibited character: {0!r}"
.format(char))
return data
def check_unassigned(self, data):
"""Checks for unassigned character codes."""
for char in data:
for lookup in self.unassigned:
if lookup(char):
raise StringprepError("Unassigned character: {0!r}"
.format(char))
return data
@staticmethod
def check_bidi(data):
"""Checks if sting is valid for bidirectional printing."""
has_l = False
has_ral = False
for char in data:
if stringprep.in_table_d1(char):
has_ral = True
elif stringprep.in_table_d2(char):
has_l = True
if has_l and has_ral:
raise StringprepError("Both RandALCat and LCat characters present")
if has_ral and (not stringprep.in_table_d1(data[0])
or not stringprep.in_table_d1(data[-1])):
raise StringprepError("The first and the last character must"
" be RandALCat")
return data
NODEPREP_PROHIBITED = set(['"', '&', "'", "/", ":", "<", ">", "@"])
NODEPREP = Profile(
unassigned = (stringprep.in_table_a1,),
mapping = (b1_mapping, stringprep.map_table_b2),
normalization = nfkc,
prohibited = ( stringprep.in_table_c11, stringprep.in_table_c12,
stringprep.in_table_c21, stringprep.in_table_c22,
stringprep.in_table_c3, stringprep.in_table_c4,
stringprep.in_table_c5, stringprep.in_table_c6,
stringprep.in_table_c7, stringprep.in_table_c8,
stringprep.in_table_c9,
lambda x: x in NODEPREP_PROHIBITED ),
bidi = True)
RESOURCEPREP = Profile(
unassigned = (stringprep.in_table_a1,),
mapping = (b1_mapping,),
normalization = nfkc,
prohibited = ( stringprep.in_table_c12, stringprep.in_table_c21,
stringprep.in_table_c22, stringprep.in_table_c3,
stringprep.in_table_c4, stringprep.in_table_c5,
stringprep.in_table_c6, stringprep.in_table_c7,
stringprep.in_table_c8, stringprep.in_table_c9 ),
bidi = True)
_stringprep_cache_size = 1000 # pylint: disable-msg=C0103
def set_stringprep_cache_size(size):
"""Modify stringprep cache size.
:Parameters:
- `size`: new cache size
"""
# pylint: disable-msg=W0603
global _stringprep_cache_size
_stringprep_cache_size = size
if len(Profile.cache_items) > size:
remove = Profile.cache_items[:-size]
for profile, key in remove:
try:
del profile.cache[key]
except KeyError:
pass
Profile.cache_items = Profile.cache_items[-size:]
# vi: sts=4 et sw=4
| apache-2.0 |
kiwifb/numpy | numpy/core/code_generators/generate_umath.py | 3 | 31173 | from __future__ import division, print_function
import os
import re
import struct
import sys
import textwrap
sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
Zero = "PyUFunc_Zero"
One = "PyUFunc_One"
None_ = "PyUFunc_None"
AllOnes = "PyUFunc_MinusOne"
ReorderableNone = "PyUFunc_ReorderableNone"
# Sentinel value to specify using the full type description in the
# function name
class FullTypeDescr(object):
pass
class FuncNameSuffix(object):
"""Stores the suffix to append when generating functions names.
"""
def __init__(self, suffix):
self.suffix = suffix
class TypeDescription(object):
"""Type signature for a ufunc.
Attributes
----------
type : str
Character representing the nominal type.
func_data : str or None or FullTypeDescr or FuncNameSuffix, optional
The string representing the expression to insert into the data
array, if any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
The typecode(s) of the outputs.
astype : dict or None, optional
If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y
instead of PyUFunc_x_x/PyUFunc_xx_x.
"""
def __init__(self, type, f=None, in_=None, out=None, astype=None):
self.type = type
self.func_data = f
if astype is None:
astype = {}
self.astype_dict = astype
if in_ is not None:
in_ = in_.replace('P', type)
self.in_ = in_
if out is not None:
out = out.replace('P', type)
self.out = out
def finish_signature(self, nin, nout):
if self.in_ is None:
self.in_ = self.type * nin
assert len(self.in_) == nin
if self.out is None:
self.out = self.type * nout
assert len(self.out) == nout
self.astype = self.astype_dict.get(self.type, None)
_fdata_map = dict(e='npy_%sf', f='npy_%sf', d='npy_%s', g='npy_%sl',
F='nc_%sf', D='nc_%s', G='nc_%sl')
def build_func_data(types, f):
func_data = []
for t in types:
d = _fdata_map.get(t, '%s') % (f,)
func_data.append(d)
return func_data
def TD(types, f=None, astype=None, in_=None, out=None):
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
else:
assert len(f) == len(types)
func_data = f
else:
func_data = (None,) * len(types)
if isinstance(in_, str):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype))
return tds
class Ufunc(object):
"""Description of a ufunc.
Attributes
----------
nin : number of input arguments
nout : number of output arguments
identity : identity element for a two-argument function
docstring : docstring for the ufunc
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
*type_descriptions):
self.nin = nin
self.nout = nout
if identity is None:
identity = None_
self.identity = identity
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
# String-handling utilities to avoid locale-dependence.
import string
if sys.version_info[0] < 3:
UPPER_TABLE = string.maketrans(string.ascii_lowercase,
string.ascii_uppercase)
else:
UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.lib.utils import english_upper
>>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_upper(s)
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
#each entry in defdict is a Ufunc object.
#name: [string of chars for which it is defined,
# string of characters using func interface,
# tuple of strings giving funcs for data,
# (in, out), or (instr, outstr) giving the signature as character codes,
# identity,
# docstring,
# output specification (optional)
# ]
chartoname = {'?': 'bool',
'b': 'byte',
'B': 'ubyte',
'h': 'short',
'H': 'ushort',
'i': 'int',
'I': 'uint',
'l': 'long',
'L': 'ulong',
'q': 'longlong',
'Q': 'ulonglong',
'e': 'half',
'f': 'float',
'd': 'double',
'g': 'longdouble',
'F': 'cfloat',
'D': 'cdouble',
'G': 'clongdouble',
'M': 'datetime',
'm': 'timedelta',
'O': 'OBJECT',
# '.' is like 'O', but calls a method of the object instead
# of a function
'P': 'OBJECT',
}
all = '?bBhHiIlLqQefdgFDGOMm'
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
times = 'Mm'
timedeltaonly = 'm'
intsO = ints + O
bints = '?' + ints
bintsO = bints + O
flts = 'efdg'
fltsO = flts + O
fltsP = flts + P
cmplx = 'FDG'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
nobool = all[1:]
noobj = all[:-3]+all[-2:]
nobool_or_obj = all[1:-3]+all[-2:]
nobool_or_datetime = all[1:-2]+all[-1:]
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
nocmplxO = nocmplx+O
nocmplxP = nocmplx+P
notimes_or_obj = bints + inexact
nodatetime_or_obj = bints + inexact
# Find which code corresponds to int64.
int64 = ''
uint64 = ''
for code in 'bhilq':
if struct.calcsize(code) == 8:
int64 = code
uint64 = english_upper(code)
break
# This dictionary describes all the ufunc implementations, generating
# all the function names and their corresponding ufunc signatures. TD is
# an object which expands a list of character codes into an array of
# TypeDescriptions.
defdict = {
'add':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.add'),
'PyUFunc_AdditionTypeResolver',
TD(notimes_or_obj),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'mM', 'M'),
],
TD(O, f='PyNumber_Add'),
),
'subtract':
Ufunc(2, 1, None, # Zero is only a unit to the right, not the left
docstrings.get('numpy.core.umath.subtract'),
'PyUFunc_SubtractionTypeResolver',
TD(notimes_or_obj),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'MM', 'm'),
],
TD(O, f='PyNumber_Subtract'),
),
'multiply':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.multiply'),
'PyUFunc_MultiplicationTypeResolver',
TD(notimes_or_obj),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'qm', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'dm', 'm'),
],
TD(O, f='PyNumber_Multiply'),
),
'divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_Divide'),
),
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
#TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_FloorDivide'),
),
'true_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.true_divide'),
'PyUFunc_DivisionTypeResolver',
TD('bBhH', out='d'),
TD('iIlLqQ', out='d'),
TD(flts+cmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_TrueDivide'),
),
'conjugate':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.conjugate'),
None,
TD(ints+flts+cmplx),
TD(P, f='conjugate'),
),
'fmod':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.fmod'),
None,
TD(ints),
TD(flts, f='fmod', astype={'e':'f'}),
TD(P, f='fmod'),
),
'square':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
TD(ints+inexact),
TD(O, f='Py_square'),
),
'reciprocal':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
None,
TD(ints+inexact),
TD(O, f='Py_reciprocal'),
),
# This is no longer used as numpy.ones_like, however it is
# still used by some internal calls.
'_ones_like':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._ones_like'),
'PyUFunc_OnesLikeTypeResolver',
TD(noobj),
TD(O, f='Py_get_one'),
),
'power':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.power'),
None,
TD(ints),
TD(inexact, f='pow', astype={'e':'f'}),
TD(O, f='npy_ObjectPower'),
),
'absolute':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
TD(bints+flts+timedeltaonly),
TD(cmplx, out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
'_arg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._arg'),
None,
TD(cmplx, out=('f', 'd', 'g')),
),
'negative':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
'PyUFunc_NegativeTypeResolver',
TD(bints+flts+timedeltaonly),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
'sign':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sign'),
'PyUFunc_SimpleUnaryOperationTypeResolver',
TD(nobool_or_datetime),
),
'greater':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'logical_and':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.logical_and'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalAnd'),
),
'logical_not':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.logical_not'),
None,
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalNot'),
),
'logical_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.logical_or'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalOr'),
),
'logical_xor':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(P, f='logical_xor'),
),
'maximum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'fmax':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'fmin':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmin'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'logaddexp':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp'),
None,
TD(flts, f="logaddexp", astype={'e':'f'})
),
'logaddexp2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp2'),
None,
TD(flts, f="logaddexp2", astype={'e':'f'})
),
'bitwise_and':
Ufunc(2, 1, AllOnes,
docstrings.get('numpy.core.umath.bitwise_and'),
None,
TD(bints),
TD(O, f='PyNumber_And'),
),
'bitwise_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_or'),
None,
TD(bints),
TD(O, f='PyNumber_Or'),
),
'bitwise_xor':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_xor'),
None,
TD(bints),
TD(O, f='PyNumber_Xor'),
),
'invert':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.invert'),
None,
TD(bints),
TD(O, f='PyNumber_Invert'),
),
'left_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.left_shift'),
None,
TD(ints),
TD(O, f='PyNumber_Lshift'),
),
'right_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.right_shift'),
None,
TD(ints),
TD(O, f='PyNumber_Rshift'),
),
'degrees':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.degrees'),
None,
TD(fltsP, f='degrees', astype={'e':'f'}),
),
'rad2deg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rad2deg'),
None,
TD(fltsP, f='rad2deg', astype={'e':'f'}),
),
'radians':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.radians'),
None,
TD(fltsP, f='radians', astype={'e':'f'}),
),
'deg2rad':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.deg2rad'),
None,
TD(fltsP, f='deg2rad', astype={'e':'f'}),
),
'arccos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccos'),
None,
TD(inexact, f='acos', astype={'e':'f'}),
TD(P, f='arccos'),
),
'arccosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccosh'),
None,
TD(inexact, f='acosh', astype={'e':'f'}),
TD(P, f='arccosh'),
),
'arcsin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsin'),
None,
TD(inexact, f='asin', astype={'e':'f'}),
TD(P, f='arcsin'),
),
'arcsinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsinh'),
None,
TD(inexact, f='asinh', astype={'e':'f'}),
TD(P, f='arcsinh'),
),
'arctan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctan'),
None,
TD(inexact, f='atan', astype={'e':'f'}),
TD(P, f='arctan'),
),
'arctanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctanh'),
None,
TD(inexact, f='atanh', astype={'e':'f'}),
TD(P, f='arctanh'),
),
'cos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
TD(inexact, f='cos', astype={'e':'f'}),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
TD(inexact, f='sin', astype={'e':'f'}),
TD(P, f='sin'),
),
'tan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tan'),
None,
TD(inexact, f='tan', astype={'e':'f'}),
TD(P, f='tan'),
),
'cosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cosh'),
None,
TD(inexact, f='cosh', astype={'e':'f'}),
TD(P, f='cosh'),
),
'sinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sinh'),
None,
TD(inexact, f='sinh', astype={'e':'f'}),
TD(P, f='sinh'),
),
'tanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tanh'),
None,
TD(inexact, f='tanh', astype={'e':'f'}),
TD(P, f='tanh'),
),
'exp':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
TD(inexact, f='exp', astype={'e':'f'}),
TD(P, f='exp'),
),
'exp2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp2'),
None,
TD(inexact, f='exp2', astype={'e':'f'}),
TD(P, f='exp2'),
),
'expm1':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.expm1'),
None,
TD(inexact, f='expm1', astype={'e':'f'}),
TD(P, f='expm1'),
),
'log':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
TD(inexact, f='log', astype={'e':'f'}),
TD(P, f='log'),
),
'log2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log2'),
None,
TD(inexact, f='log2', astype={'e':'f'}),
TD(P, f='log2'),
),
'log10':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log10'),
None,
TD(inexact, f='log10', astype={'e':'f'}),
TD(P, f='log10'),
),
'log1p':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log1p'),
None,
TD(inexact, f='log1p', astype={'e':'f'}),
TD(P, f='log1p'),
),
'sqrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
None,
TD('e', f='sqrt', astype={'e':'f'}),
TD(inexactvec),
TD(inexact, f='sqrt', astype={'e':'f'}),
TD(P, f='sqrt'),
),
'cbrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cbrt'),
None,
TD(flts, f='cbrt', astype={'e':'f'}),
TD(P, f='cbrt'),
),
'ceil':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
TD(flts, f='ceil', astype={'e':'f'}),
TD(P, f='ceil'),
),
'trunc':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
TD(flts, f='trunc', astype={'e':'f'}),
TD(P, f='trunc'),
),
'fabs':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.fabs'),
None,
TD(flts, f='fabs', astype={'e':'f'}),
TD(P, f='fabs'),
),
'floor':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
TD(flts, f='floor', astype={'e':'f'}),
TD(P, f='floor'),
),
'rint':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
TD(inexact, f='rint', astype={'e':'f'}),
TD(P, f='rint'),
),
'arctan2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.arctan2'),
None,
TD(flts, f='atan2', astype={'e':'f'}),
TD(P, f='arctan2'),
),
'remainder':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
None,
TD(intflt),
TD(O, f='PyNumber_Remainder'),
),
'hypot':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.hypot'),
None,
TD(flts, f='hypot', astype={'e':'f'}),
TD(P, f='hypot'),
),
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
None,
TD(inexact, out='?'),
),
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
None,
TD(inexact, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isfinite'),
None,
TD(inexact, out='?'),
),
'signbit':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.signbit'),
None,
TD(flts, out='?'),
),
'copysign':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.copysign'),
None,
TD(flts),
),
'nextafter':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.nextafter'),
None,
TD(flts),
),
'spacing':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.spacing'),
None,
TD(flts),
),
'modf':
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.modf'),
None,
TD(flts),
),
'ldexp' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.ldexp'),
None,
[TypeDescription('e', None, 'ei', 'e'),
TypeDescription('f', None, 'fi', 'f'),
TypeDescription('e', FuncNameSuffix('long'), 'el', 'e'),
TypeDescription('f', FuncNameSuffix('long'), 'fl', 'f'),
TypeDescription('d', None, 'di', 'd'),
TypeDescription('d', FuncNameSuffix('long'), 'dl', 'd'),
TypeDescription('g', None, 'gi', 'g'),
TypeDescription('g', FuncNameSuffix('long'), 'gl', 'g'),
],
),
'frexp' :
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.frexp'),
None,
[TypeDescription('e', None, 'e', 'ei'),
TypeDescription('f', None, 'f', 'fi'),
TypeDescription('d', None, 'd', 'di'),
TypeDescription('g', None, 'g', 'gi'),
],
)
}
if sys.version_info[0] >= 3:
# Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators
del defdict['divide']
def indent(st, spaces):
indention = ' '*spaces
indented = indention + st.replace('\n', '\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
chartotype1 = {'e': 'e_e',
'f': 'f_f',
'd': 'd_d',
'g': 'g_g',
'F': 'F_F',
'D': 'D_D',
'G': 'G_G',
'O': 'O_O',
'P': 'O_O_method'}
chartotype2 = {'e': 'ee_e',
'f': 'ff_f',
'd': 'dd_d',
'g': 'gg_g',
'F': 'FF_F',
'D': 'DD_D',
'G': 'GG_G',
'O': 'OO_O',
'P': 'OO_O_method'}
#for each name
# 1) create functions, data, and signature
# 2) fill in functions and data in InitOperators
# 3) add function.
def make_arrays(funcdict):
# functions array contains an entry for every type implemented NULL
# should be placed where PyUfunc_ style function will be filled in
# later
code1list = []
code2list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
funclist = []
datalist = []
siglist = []
k = 0
sub = 0
if uf.nin > 1:
assert uf.nin == 2
thedict = chartotype2 # two inputs and one output
else:
thedict = chartotype1 # one input and one output
for t in uf.type_descriptions:
if (t.func_data not in (None, FullTypeDescr) and
not isinstance(t.func_data, FuncNameSuffix)):
funclist.append('NULL')
astype = ''
if not t.astype is None:
astype = '_As_%s' % thedict[t.astype]
astr = ('%s_functions[%d] = PyUFunc_%s%s;' %
(name, k, thedict[t.type], astype))
code2list.append(astr)
if t.type == 'O':
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
elif t.type == 'P':
datalist.append('(void *)"%s"' % t.func_data)
else:
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
#datalist.append('(void *)%s' % t.func_data)
sub += 1
elif t.func_data is FullTypeDescr:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
funclist.append(
'%s_%s_%s_%s' % (tname, t.in_, t.out, name))
elif isinstance(t.func_data, FuncNameSuffix):
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append(
'%s_%s_%s' % (tname, name, t.func_data.suffix))
else:
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append('%s_%s' % (tname, name))
for x in t.in_ + t.out:
siglist.append('NPY_%s' % (english_upper(chartoname[x]),))
k += 1
funcnames = ', '.join(funclist)
signames = ', '.join(siglist)
datanames = ', '.join(datalist)
code1list.append("static PyUFuncGenericFunction %s_functions[] = {%s};"
% (name, funcnames))
code1list.append("static void * %s_data[] = {%s};"
% (name, datanames))
code1list.append("static char %s_signatures[] = {%s};"
% (name, signames))
return "\n".join(code1list), "\n".join(code2list)
def make_ufuncs(funcdict):
code3list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
mlist = []
docstring = textwrap.dedent(uf.docstring).strip()
if sys.version_info[0] < 3:
docstring = docstring.encode('string-escape')
docstring = docstring.replace(r'"', r'\"')
else:
docstring = docstring.encode('unicode-escape').decode('ascii')
docstring = docstring.replace(r'"', r'\"')
# XXX: I don't understand why the following replace is not
# necessary in the python 2 case.
docstring = docstring.replace(r"'", r"\'")
# Split the docstring because some compilers (like MS) do not like big
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
mlist.append(\
r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
%d, %d, %s, "%s",
"%s", 0);""" % (name, name, name,
len(uf.type_descriptions),
uf.nin, uf.nout,
uf.identity,
name, docstring))
if uf.typereso is not None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name)
mlist.append(r"""Py_DECREF(f);""")
code3list.append('\n'.join(mlist))
return '\n'.join(code3list)
def make_code(funcdict, filename):
code1, code2 = make_arrays(funcdict)
code3 = make_ufuncs(funcdict)
code2 = indent(code2, 4)
code3 = indent(code3, 4)
code = r"""
/** Warning this file is autogenerated!!!
Please make changes to the code generator program (%s)
**/
%s
static void
InitOperators(PyObject *dictionary) {
PyObject *f;
%s
%s
}
""" % (filename, code1, code2, code3)
return code
if __name__ == "__main__":
filename = __file__
fid = open('__umath_generated.c', 'w')
code = make_code(defdict, filename)
fid.write(code)
fid.close()
| bsd-3-clause |
nazeehshoura/crawler | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 743 | 1141 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
| mit |
itaiin/arrow | python/pyarrow/tests/test_jvm.py | 5 | 13848 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import pyarrow as pa
import pyarrow.jvm as pa_jvm
import pytest
import six
import sys
import xml.etree.ElementTree as ET
jpype = pytest.importorskip("jpype")
@pytest.fixture(scope="session")
def root_allocator():
# This test requires Arrow Java to be built in the same source tree
pom_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'pom.xml')
tree = ET.parse(pom_path)
version = tree.getroot().find(
'POM:version',
namespaces={
'POM': 'http://maven.apache.org/POM/4.0.0'
}).text
jar_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'tools', 'target',
'arrow-tools-{}-jar-with-dependencies.jar'.format(version))
jar_path = os.getenv("ARROW_TOOLS_JAR", jar_path)
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path=" + jar_path)
return jpype.JPackage("org").apache.arrow.memory.RootAllocator(sys.maxsize)
def test_jvm_buffer(root_allocator):
# Create a buffer
jvm_buffer = root_allocator.buffer(8)
for i in range(8):
jvm_buffer.setByte(i, 8 - i)
# Convert to Python
buf = pa_jvm.jvm_buffer(jvm_buffer)
# Check its content
assert buf.to_pybytes() == b'\x08\x07\x06\x05\x04\x03\x02\x01'
def _jvm_field(jvm_spec):
om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
pojo_Field = jpype.JClass('org.apache.arrow.vector.types.pojo.Field')
return om.readValue(jvm_spec, pojo_Field)
def _jvm_schema(jvm_spec, metadata=None):
field = _jvm_field(jvm_spec)
schema_cls = jpype.JClass('org.apache.arrow.vector.types.pojo.Schema')
fields = jpype.JClass('java.util.ArrayList')()
fields.add(field)
if metadata:
dct = jpype.JClass('java.util.HashMap')()
for k, v in six.iteritems(metadata):
dct.put(k, v)
return schema_cls(fields, dct)
else:
return schema_cls(fields)
# In the following, we use the JSON serialization of the Field objects in Java.
# This ensures that we neither rely on the exact mechanics on how to construct
# them using Java code as well as enables us to define them as parameters
# without to invoke the JVM.
#
# The specifications were created using:
#
# om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
# field = … # Code to instantiate the field
# jvm_spec = om.writeValueAsString(field)
@pytest.mark.parametrize('pa_type,jvm_spec', [
(pa.null(), '{"name":"null"}'),
(pa.bool_(), '{"name":"bool"}'),
(pa.int8(), '{"name":"int","bitWidth":8,"isSigned":true}'),
(pa.int16(), '{"name":"int","bitWidth":16,"isSigned":true}'),
(pa.int32(), '{"name":"int","bitWidth":32,"isSigned":true}'),
(pa.int64(), '{"name":"int","bitWidth":64,"isSigned":true}'),
(pa.uint8(), '{"name":"int","bitWidth":8,"isSigned":false}'),
(pa.uint16(), '{"name":"int","bitWidth":16,"isSigned":false}'),
(pa.uint32(), '{"name":"int","bitWidth":32,"isSigned":false}'),
(pa.uint64(), '{"name":"int","bitWidth":64,"isSigned":false}'),
(pa.float16(), '{"name":"floatingpoint","precision":"HALF"}'),
(pa.float32(), '{"name":"floatingpoint","precision":"SINGLE"}'),
(pa.float64(), '{"name":"floatingpoint","precision":"DOUBLE"}'),
(pa.time32('s'), '{"name":"time","unit":"SECOND","bitWidth":32}'),
(pa.time32('ms'), '{"name":"time","unit":"MILLISECOND","bitWidth":32}'),
(pa.time64('us'), '{"name":"time","unit":"MICROSECOND","bitWidth":64}'),
(pa.time64('ns'), '{"name":"time","unit":"NANOSECOND","bitWidth":64}'),
(pa.timestamp('s'), '{"name":"timestamp","unit":"SECOND",'
'"timezone":null}'),
(pa.timestamp('ms'), '{"name":"timestamp","unit":"MILLISECOND",'
'"timezone":null}'),
(pa.timestamp('us'), '{"name":"timestamp","unit":"MICROSECOND",'
'"timezone":null}'),
(pa.timestamp('ns'), '{"name":"timestamp","unit":"NANOSECOND",'
'"timezone":null}'),
(pa.timestamp('ns', tz='UTC'), '{"name":"timestamp","unit":"NANOSECOND"'
',"timezone":"UTC"}'),
(pa.timestamp('ns', tz='Europe/Paris'), '{"name":"timestamp",'
'"unit":"NANOSECOND","timezone":"Europe/Paris"}'),
(pa.date32(), '{"name":"date","unit":"DAY"}'),
(pa.date64(), '{"name":"date","unit":"MILLISECOND"}'),
(pa.decimal128(19, 4), '{"name":"decimal","precision":19,"scale":4}'),
(pa.string(), '{"name":"utf8"}'),
(pa.binary(), '{"name":"binary"}'),
(pa.binary(10), '{"name":"fixedsizebinary","byteWidth":10}'),
# TODO(ARROW-2609): complex types that have children
# pa.list_(pa.int32()),
# pa.struct([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
# TODO: DictionaryType requires a vector in the type
# pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])),
])
@pytest.mark.parametrize('nullable', [True, False])
def test_jvm_types(root_allocator, pa_type, jvm_spec, nullable):
spec = {
'name': 'field_name',
'nullable': nullable,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
result = pa_jvm.field(jvm_field)
expected_field = pa.field('field_name', pa_type, nullable=nullable)
assert result == expected_field
jvm_schema = _jvm_schema(json.dumps(spec))
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field])
# Schema with custom metadata
jvm_schema = _jvm_schema(json.dumps(spec), {'meta': 'data'})
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field], {'meta': 'data'})
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type', [
(pa.bool_(), [True, False, True, True], 'BitVector'),
(pa.uint8(), list(range(128)), 'UInt1Vector'),
(pa.uint16(), list(range(128)), 'UInt2Vector'),
(pa.int32(), list(range(128)), 'IntVector'),
(pa.int64(), list(range(128)), 'BigIntVector'),
(pa.float32(), list(range(128)), 'Float4Vector'),
(pa.float64(), list(range(128)), 'Float8Vector'),
(pa.timestamp('s'), list(range(128)), 'TimeStampSecVector'),
(pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector'),
(pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector'),
(pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector'),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(pa.date32(), list(range(128)), 'DateDayVector'),
(pa.date64(), list(range(128)), 'DateMilliVector'),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_array(root_allocator, pa_type, py_data, jvm_type):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
py_array = pa.array(py_data, type=pa_type)
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type,jvm_spec', [
# TODO: null
(pa.bool_(), [True, False, True, True], 'BitVector', '{"name":"bool"}'),
(
pa.uint8(),
list(range(128)),
'UInt1Vector',
'{"name":"int","bitWidth":8,"isSigned":false}'
),
(
pa.uint16(),
list(range(128)),
'UInt2Vector',
'{"name":"int","bitWidth":16,"isSigned":false}'
),
(
pa.uint32(),
list(range(128)),
'UInt4Vector',
'{"name":"int","bitWidth":32,"isSigned":false}'
),
(
pa.uint64(),
list(range(128)),
'UInt8Vector',
'{"name":"int","bitWidth":64,"isSigned":false}'
),
(
pa.int8(),
list(range(128)),
'TinyIntVector',
'{"name":"int","bitWidth":8,"isSigned":true}'
),
(
pa.int16(),
list(range(128)),
'SmallIntVector',
'{"name":"int","bitWidth":16,"isSigned":true}'
),
(
pa.int32(),
list(range(128)),
'IntVector',
'{"name":"int","bitWidth":32,"isSigned":true}'
),
(
pa.int64(),
list(range(128)),
'BigIntVector',
'{"name":"int","bitWidth":64,"isSigned":true}'
),
# TODO: float16
(
pa.float32(),
list(range(128)),
'Float4Vector',
'{"name":"floatingpoint","precision":"SINGLE"}'
),
(
pa.float64(),
list(range(128)),
'Float8Vector',
'{"name":"floatingpoint","precision":"DOUBLE"}'
),
(
pa.timestamp('s'),
list(range(128)),
'TimeStampSecVector',
'{"name":"timestamp","unit":"SECOND","timezone":null}'
),
(
pa.timestamp('ms'),
list(range(128)),
'TimeStampMilliVector',
'{"name":"timestamp","unit":"MILLISECOND","timezone":null}'
),
(
pa.timestamp('us'),
list(range(128)),
'TimeStampMicroVector',
'{"name":"timestamp","unit":"MICROSECOND","timezone":null}'
),
(
pa.timestamp('ns'),
list(range(128)),
'TimeStampNanoVector',
'{"name":"timestamp","unit":"NANOSECOND","timezone":null}'
),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(
pa.date32(),
list(range(128)),
'DateDayVector',
'{"name":"date","unit":"DAY"}'
),
(
pa.date64(),
list(range(128)),
'DateMilliVector',
'{"name":"date","unit":"MILLISECOND"}'
),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_record_batch(root_allocator, pa_type, py_data, jvm_type,
jvm_spec):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
# Create field
spec = {
'name': 'field_name',
'nullable': False,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
# Create VectorSchemaRoot
jvm_fields = jpype.JClass('java.util.ArrayList')()
jvm_fields.add(jvm_field)
jvm_vectors = jpype.JClass('java.util.ArrayList')()
jvm_vectors.add(jvm_vector)
jvm_vsr = jpype.JClass('org.apache.arrow.vector.VectorSchemaRoot')
jvm_vsr = jvm_vsr(jvm_fields, jvm_vectors, len(py_data))
py_record_batch = pa.RecordBatch.from_arrays(
[pa.array(py_data, type=pa_type)],
['col']
)
jvm_record_batch = pa_jvm.record_batch(jvm_vsr)
assert py_record_batch.equals(jvm_record_batch)
def _string_to_varchar_holder(ra, string):
nvch_cls = "org.apache.arrow.vector.holders.NullableVarCharHolder"
holder = jpype.JClass(nvch_cls)()
if string is None:
holder.isSet = 0
else:
holder.isSet = 1
value = jpype.JClass("java.lang.String")("string")
std_charsets = jpype.JClass("java.nio.charset.StandardCharsets")
bytes_ = value.getBytes(std_charsets.UTF_8)
holder.buffer = ra.buffer(len(bytes_))
holder.buffer.setBytes(0, bytes_, 0, len(bytes_))
holder.start = 0
holder.end = len(bytes_)
return holder
# TODO(ARROW-2607)
@pytest.mark.xfail(reason="from_buffers is only supported for "
"primitive arrays yet")
def test_jvm_string_array(root_allocator):
data = [u"string", None, u"töst"]
cls = "org.apache.arrow.vector.VarCharVector"
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew()
for i, string in enumerate(data):
holder = _string_to_varchar_holder(root_allocator, "string")
jvm_vector.setSafe(i, holder)
jvm_vector.setValueCount(i + 1)
py_array = pa.array(data, type=pa.string())
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
| apache-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/webkit/Tools/TestResultServer/model/jsonresults_unittest.py | 15 | 14980 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import jsonresults
from jsonresults import JsonResults
except ImportError:
print "ERROR: Add the TestResultServer, google_appengine and yaml/lib directories to your PYTHONPATH"
import unittest
JSON_RESULTS_TEMPLATE = (
'{"Webkit":{'
'"allFixableCount":[[TESTDATA_COUNT]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
'"deferredCounts":[[TESTDATA_COUNTS]],'
'"fixableCount":[[TESTDATA_COUNT]],'
'"fixableCounts":[[TESTDATA_COUNTS]],'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
'"tests":{[TESTDATA_TESTS]},'
'"webkitRevision":[[TESTDATA_WEBKITREVISION]],'
'"wontfixCounts":[[TESTDATA_COUNTS]]'
'},'
'"version":3'
'}')
JSON_RESULTS_COUNTS_TEMPLATE = (
'{'
'"C":[TESTDATA],'
'"F":[TESTDATA],'
'"I":[TESTDATA],'
'"O":[TESTDATA],'
'"P":[TESTDATA],'
'"T":[TESTDATA],'
'"X":[TESTDATA],'
'"Z":[TESTDATA]}')
JSON_RESULTS_TESTS_TEMPLATE = (
'"[TESTDATA_TEST_NAME]":{'
'"results":[[TESTDATA_TEST_RESULTS]],'
'"times":[[TESTDATA_TEST_TIMES]]}')
JSON_RESULTS_PREFIX = "ADD_RESULTS("
JSON_RESULTS_SUFFIX = ");"
JSON_RESULTS_TEST_LIST_TEMPLATE = (
'{"Webkit":{"tests":{[TESTDATA_TESTS]}}}')
class JsonResultsTest(unittest.TestCase):
def setUp(self):
self._builder = "Webkit"
def _make_test_json(self, test_data):
if not test_data:
return JSON_RESULTS_PREFIX + JSON_RESULTS_SUFFIX
(builds, tests) = test_data
if not builds or not tests:
return JSON_RESULTS_PREFIX + JSON_RESULTS_SUFFIX
json = JSON_RESULTS_TEMPLATE
counts = []
build_numbers = []
webkit_revision = []
chrome_revision = []
times = []
for build in builds:
counts.append(JSON_RESULTS_COUNTS_TEMPLATE.replace("[TESTDATA]", build))
build_numbers.append("1000%s" % build)
webkit_revision.append("2000%s" % build)
chrome_revision.append("3000%s" % build)
times.append("100000%s000" % build)
json = json.replace("[TESTDATA_COUNTS]", ",".join(counts))
json = json.replace("[TESTDATA_COUNT]", ",".join(builds))
json = json.replace("[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
json = json.replace("[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
json = json.replace("[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
json = json.replace("[TESTDATA_TIMES]", ",".join(times))
json_tests = []
for test in tests:
t = JSON_RESULTS_TESTS_TEMPLATE.replace("[TESTDATA_TEST_NAME]", test[0])
t = t.replace("[TESTDATA_TEST_RESULTS]", test[1])
t = t.replace("[TESTDATA_TEST_TIMES]", test[2])
json_tests.append(t)
json = json.replace("[TESTDATA_TESTS]", ",".join(json_tests))
return JSON_RESULTS_PREFIX + json + JSON_RESULTS_SUFFIX
def _test_merge(self, aggregated_data, incremental_data, expected_data, max_builds=jsonresults.JSON_RESULTS_MAX_BUILDS):
aggregated_results = self._make_test_json(aggregated_data)
incremental_results = self._make_test_json(incremental_data)
merged_results = JsonResults.merge(self._builder,
aggregated_results, incremental_results, max_builds,
sort_keys=True)
if expected_data:
expected_results = self._make_test_json(expected_data)
self.assertEquals(merged_results, expected_results)
else:
self.assertFalse(merged_results)
def _test_get_test_list(self, input_data, expected_data):
input_results = self._make_test_json(input_data)
json_tests = []
for test in expected_data:
json_tests.append("\"" + test + "\":{}")
expected_results = JSON_RESULTS_PREFIX + \
JSON_RESULTS_TEST_LIST_TEMPLATE.replace(
"[TESTDATA_TESTS]", ",".join(json_tests)) + \
JSON_RESULTS_SUFFIX
actual_results = JsonResults.get_test_list(self._builder, input_results)
self.assertEquals(actual_results, expected_results)
def test(self):
# Empty incremental results json.
# Nothing to merge.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
None,
# Expect no merge happens.
None)
# No actual incremental test results (only prefix and suffix) to merge.
# Nothing to merge.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
([], []),
# Expected no merge happens.
None)
# No existing aggregated results.
# Merged results == new incremental results.
self._test_merge(
# Aggregated results
None,
# Incremental results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Expected results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]))
# Single test for single run.
# Incremental results has the latest build and same test results for
# that run.
# Insert the incremental results at the first place and sum number
# of runs for "P" (200 + 1) to get merged results.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"F\"]", "[1,0]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[201,\"F\"]", "[201,0]"]]))
# Single test for single run.
# Incremental results has the latest build but different test results
# for that run.
# Insert the incremental results at the first place.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
(["3"], [["001.html", "[1, \"I\"]", "[1,1]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[1,\"I\"],[200,\"F\"]", "[1,1],[200,0]"]]))
# Single test for single run.
# Incremental results has the latest build but different test results
# for that run.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"],[10,\"I\"]", "[200,0],[10,1]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"I\"]", "[1,1]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[1,\"I\"],[200,\"F\"],[10,\"I\"]", "[1,1],[200,0],[10,1]"]]))
# Multiple tests for single run.
# All tests have incremental updates.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"], ["002.html", "[100,\"I\"]", "[100,1]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"F\"]", "[1,0]"], ["002.html", "[1,\"I\"]", "[1,1]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[201,\"F\"]", "[201,0]"], ["002.html", "[101,\"I\"]", "[101,1]"]]))
# Multiple tests for single run.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"], ["002.html", "[100,\"I\"]", "[100,1]"]]),
# Incremental results
(["3"], [["002.html", "[1,\"I\"]", "[1,1]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[1,\"N\"],[200,\"F\"]", "[201,0]"], ["002.html", "[101,\"I\"]", "[101,1]"]]))
# Single test for multiple runs.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
(["4", "3"], [["001.html", "[2, \"I\"]", "[2,2]"]]),
# Expected results
(["4", "3", "2", "1"], [["001.html", "[2,\"I\"],[200,\"F\"]", "[2,2],[200,0]"]]))
# Multiple tests for multiple runs.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"], ["002.html", "[10,\"Z\"]", "[10,0]"]]),
# Incremental results
(["4", "3"], [["001.html", "[2, \"I\"]", "[2,2]"], ["002.html", "[1,\"C\"]", "[1,1]"]]),
# Expected results
(["4", "3", "2", "1"], [["001.html", "[2,\"I\"],[200,\"F\"]", "[2,2],[200,0]"], ["002.html", "[1,\"C\"],[10,\"Z\"]", "[1,1],[10,0]"]]))
# Test the build in incremental results is older than the most recent
# build in aggregated results.
# The incremental results should be dropped and no merge happens.
self._test_merge(
# Aggregated results
(["3", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
(["2"], [["001.html", "[1, \"F\"]", "[1,0]"]]),
# Expected no merge happens.
None)
# Test the build in incremental results is same as the build in
# aggregated results.
# The incremental results should be dropped and no merge happens.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"F\"]", "[200,0]"]]),
# Incremental results
(["3", "2"], [["001.html", "[2, \"F\"]", "[2,0]"]]),
# Expected no merge happens.
None)
# Remove test where there is no data in all runs.
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"N\"]", "[200,0]"], ["002.html", "[10,\"F\"]", "[10,0]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"N\"]", "[1,0]"], ["002.html", "[1,\"P\"]", "[1,0]"]]),
# Expected results
(["3", "2", "1"], [["002.html", "[1,\"P\"],[10,\"F\"]", "[11,0]"]]))
# Remove test where all run pass and max running time < 1 seconds
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"P\"]", "[200,0]"], ["002.html", "[10,\"F\"]", "[10,0]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"P\"]", "[1,0]"], ["002.html", "[1,\"P\"]", "[1,0]"]]),
# Expected results
(["3", "2", "1"], [["002.html", "[1,\"P\"],[10,\"F\"]", "[11,0]"]]))
# Do not remove test where all run pass but max running time >= 1 seconds
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[200,\"P\"]", "[200,0]"], ["002.html", "[10,\"F\"]", "[10,0]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"P\"]", "[1,1]"], ["002.html", "[1,\"P\"]", "[1,0]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[201,\"P\"]", "[1,1],[200,0]"], ["002.html", "[1,\"P\"],[10,\"F\"]", "[11,0]"]]))
# Remove items from test results and times that exceed the max number
# of builds to track.
max_builds = str(jsonresults.JSON_RESULTS_MAX_BUILDS)
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[" + max_builds + ",\"F\"],[1,\"I\"]", "[" + max_builds + ",0],[1,1]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"T\"]", "[1,1]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[1,\"T\"],[" + max_builds + ",\"F\"]", "[1,1],[" + max_builds + ",0]"]]))
# Remove items from test results and times that exceed the max number
# of builds to track, using smaller threshold.
max_builds = str(jsonresults.JSON_RESULTS_MAX_BUILDS_SMALL)
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[" + max_builds + ",\"F\"],[1,\"I\"]", "[" + max_builds + ",0],[1,1]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"T\"]", "[1,1]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[1,\"T\"],[" + max_builds + ",\"F\"]", "[1,1],[" + max_builds + ",0]"]]),
int(max_builds))
# Test that merging in a new result of the same type as the last result
# causes old results to fall off.
max_builds = str(jsonresults.JSON_RESULTS_MAX_BUILDS_SMALL)
self._test_merge(
# Aggregated results
(["2", "1"], [["001.html", "[" + max_builds + ",\"F\"],[1,\"N\"]", "[" + max_builds + ",0],[1,1]"]]),
# Incremental results
(["3"], [["001.html", "[1,\"F\"]", "[1,0]"]]),
# Expected results
(["3", "2", "1"], [["001.html", "[" + max_builds + ",\"F\"]", "[" + max_builds + ",0]"]]),
int(max_builds))
# Get test name list only. Don't include non-test-list data and
# of test result details.
self._test_get_test_list(
# Input results
(["3", "2", "1"], [["001.html", "[200,\"P\"]", "[200,0]"], ["002.html", "[10,\"F\"]", "[10,0]"]]),
# Expected results
["001.html", "002.html"])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
itsknob/TechnicalServicesScheduler-Python | TechTypes.py | 1 | 7328 | """ Used for creating Job Types"""
from enum import Enum
class EmployeeType(Enum):
"""Defines Job Types
General Staff, Manager in Training, or Manager
"""
GENERAL = 0
MANAGER_IN_TRAINING = 1
MANAGER = 2
class PersonalInformation:
@property
def student_id(self):
return self._student_id
@student_id.setter
def student_id(self, new_id):
self._student_id = new_id
@property
def name_first(self):
return self._name_first
@name_first.setter
def name_first(self, new_name_first):
self._name_first = new_name_first
@property
def name_last(self):
return self._name_last
@name_last.setter
def name_last(self, new_name_last):
self._name_last = new_name_last
@property
def email(self):
return self._email
@email.setter
def email(self, new_email):
#self._email = "\'"+new_email+"\'"
self._email = new_email
@property
def phone_number(self):
return self._phone_number
@phone_number.setter
def phone_number(self, new_phone_number):
self._phone_number = new_phone_number
class WorkInformation:
@property
def job_type(self):
return self._job_type
@job_type.setter
def job_type(self, new_job_type=EmployeeType):
self._job_type = new_job_type
@property
def date_hire(self):
return self._date_hire
@date_hire.setter
def date_hire(self, new_date_hire):
self._date_hire = new_date_hire
@property
def date_graduate(self):
return self._date_graduate
@date_graduate.setter
def date_graduate(self, new_date_graduate):
self._date_graduate = new_date_graduate
@property
def shirt_size(self):
return self._shirt_size
@shirt_size.setter
def shirt_size(self, new_shirt_size):
self._shirt_size = new_shirt_size
@property
def notes(self):
return self._notes
@notes.setter
def notes(self, new_notes):
self._notes = new_notes
class Employee:
"""
Contains Personal and Work-related information about the student
Requires PersionalInformation and WorkInformation Objects on Initialization
"""
"""
trainings_completed = {"training_aud_sound": False, "training_aud_lights": False, "training_mobile_sound": False, "training_mobile_lights": False, "training_stage_safety": False, "training_commuter_cafe": False, "training_woodland_commons": False, "training_grand_reading_room": False, "training_professionalism": False, "training_x32": False, "training_sound_consoles": False, "training_sound_design": False, "training_amp_speaker_matching": False, "training_advanced_ion": False, "training_lighting_design": False, "training_networking": False, "training_equiptment_repair": False, "training_scenery_shop": False}
"""
def __init__(self, p_info=PersonalInformation, w_info=WorkInformation, trainings=list):
self.personal_information = p_info
self.work_information = w_info
self.trainings_completed = trainings
def get_full_name(self):
return self.personal_information.name_first + " " + self.personal_information.name_last
def get_personal_info_as_list(self):
personal_info_list = list()
personal_info_list.append(self.personal_information.student_id)
personal_info_list.append(self.personal_information.name_first)
personal_info_list.append(self.personal_information.name_last)
personal_info_list.append(self.personal_information.email)
personal_info_list.append(self.personal_information.phone_number)
return personal_info_list
def get_work_info_as_list(self):
work_info_list = list()
work_info_list.append(self.work_information.job_type)
work_info_list.append(self.work_information.date_hire)
work_info_list.append(self.work_information.date_graduate)
work_info_list.append(self.work_information.shirt_size)
work_info_list.append(self.work_information.notes)
return work_info_list
def return_all_info_as_list(self):
all_info_list = list()
temp1 = self.get_personal_info_as_list()
temp2 = self.get_work_info_as_list()
all_info_list.extend(temp1)
all_info_list.extend(temp2)
return all_info_list
class Event:
"""Holds information about Events"""
def __init__(self, name, location, date, start_time, end_time, number_employees, req_manager):
self._event_name = name
self._event_location = location
self._event_date = date
self._event_start_time = start_time
self._event_end_time = end_time
self._event_number_employees = number_employees
self._event_requires_manager = req_manager
self._event_employee_list = list()
self._event_manager = None
# "Getters/Setters" - Required Attributes
@property
def event_name(self):
"""Name of Event"""
return self._event_name
@event_name.setter
def event_name(self, new_name):
self._event_name = new_name
@property
def event_location(self):
return self._event_location
@event_location.setter
def event_location(self, new_location):
self._event_location = new_location
@property
def event_date(self):
return self._event_date
@event_date.setter
def event_date(self, new_date):
self._event_date = new_date
@property
def event_start_time(self):
return self._event_start_time
@event_start_time.setter
def event_start_time(self, new_start_time):
self._event_start_time = new_start_time
@property
def event_end_time(self):
return self._event_end_time
@event_end_time.setter
def event_end_time(self, new_end_time):
self._event_end_time = new_end_time
@property
def event_number_employees(self):
return self._event_number_employees
@event_number_employees.setter
def event_number_employees(self, new_number):
self._event_number_employees = new_number
@property
def event_requires_manager(self):
return self._event_requires_manager
@event_requires_manager.setter
def event_requires_manager(self, new_requires):
self._event_requires_manager = new_requires
# "Getters/Setters" - Additional Attributes
@property
def event_employee_list(self):
return self._event_employee_list
@event_employee_list.setter
def event_employee_list(self, new_employee_list):
self._event_employee_list = new_employee_list
@property
def event_manager(self):
return self._event_manager
@event_manager.setter
def event_manager(self, new_manager):
self._event_manager = new_manager
# Event Methods
def add_employee_to_event(self, employee=Employee):
self.event_employee_list.append(employee)
def remove_employee_from_event(self, employee=Employee):
self.event_employee_list.remove(employee)
| mit |
nojhan/weboob-devel | weboob/applications/pastoob/pastoob.py | 7 | 7566 | # -*- coding: utf-8 -*-
# Copyright(C) 2011-2014 Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import codecs
import re
from random import choice
from weboob.capabilities.paste import CapPaste, PasteNotFound
from weboob.tools.application.repl import ReplApplication
__all__ = ['Pastoob']
class Pastoob(ReplApplication):
APPNAME = 'pastoob'
VERSION = '1.1'
COPYRIGHT = 'Copyright(C) 2011-YEAR Laurent Bachelier'
DESCRIPTION = "Console application allowing to post and get pastes from pastebins."
SHORT_DESCRIPTION = "post and get pastes from pastebins"
CAPS = CapPaste
def main(self, argv):
self.load_config()
return ReplApplication.main(self, argv)
def do_info(self, line):
"""
info ID [ID2 [...]]
Get information about pastes.
"""
if not line:
print('This command takes an argument: %s' % self.get_command_help('info', short=True), file=self.stderr)
return 2
self.start_format()
for _id in line.split(' '):
paste = self.get_object(_id, 'get_paste', ['id', 'title', 'language', 'public', 'contents'])
if not paste:
print('Paste not found: %s' % _id, file=self.stderr)
self.format(paste)
def do_get(self, line):
"""
get ID
Get a paste contents.
"""
return self._get_op(line, binary=False, command='get')
def do_get_bin(self, line):
"""
get_bin ID
Get a paste contents.
File will be downloaded from binary services.
"""
return self._get_op(line, binary=True, command='get_bin')
def _get_op(self, _id, binary, command='get'):
if not _id:
print('This command takes an argument: %s' % self.get_command_help(command, short=True), file=self.stderr)
return 2
try:
paste = self.get_object(_id, 'get_paste', ['contents'])
except PasteNotFound:
print('Paste not found: %s' % _id, file=self.stderr)
return 3
if not paste:
print('Unable to handle paste: %s' % _id, file=self.stderr)
return 1
if binary:
if self.interactive:
if not self.ask('The console may become messed up. Are you sure you want to show a binary file on your terminal?', default=False):
print('Aborting.', file=self.stderr)
return 1
output = self.stdout
output.write(paste.contents.decode('base64'))
else:
output = codecs.getwriter(self.encoding)(self.stdout)
output.write(paste.contents)
# add a newline unless we are writing
# in a file or in a pipe
if output.isatty():
output.write('\n')
def do_post(self, line):
"""
post [FILENAME]
Submit a new paste.
The filename can be '-' for reading standard input (pipe).
If 'bin' is passed, file will be uploaded to binary services.
"""
return self._post(line, binary=False)
def do_post_bin(self, line):
"""
post_bin [FILENAME]
Submit a new paste.
The filename can be '-' for reading standard input (pipe).
File will be uploaded to binary services.
"""
return self._post(line, binary=True)
def _post(self, filename, binary):
use_stdin = (not filename or filename == '-')
if use_stdin:
if binary:
contents = self.stdin.read()
else:
contents = self.acquire_input()
if not len(contents):
print('Empty paste, aborting.', file=self.stderr)
return 1
else:
try:
if binary:
m = open(filename)
else:
m = codecs.open(filename, encoding=self.options.encoding or self.encoding)
with m as fp:
contents = fp.read()
except IOError as e:
print('Unable to open file "%s": %s' % (filename, e.strerror), file=self.stderr)
return 1
if binary:
contents = contents.encode('base64')
# get and sort the backends able to satisfy our requirements
params = self.get_params()
backends = {}
for backend in self.weboob.iter_backends():
score = backend.can_post(contents, **params)
if score:
backends.setdefault(score, []).append(backend)
# select a random backend from the best scores
if len(backends):
backend = choice(backends[max(backends.keys())])
else:
print('No suitable backend found.', file=self.stderr)
return 1
p = backend.new_paste(_id=None)
p.public = params['public']
if self.options.title is not None:
p.title = self.options.title
else:
p.title = os.path.basename(filename)
p.contents = contents
backend.post_paste(p, max_age=params['max_age'])
print('Successfuly posted paste: %s' % p.page_url)
def get_params(self):
return {'public': self.options.public,
'max_age': self.str_to_duration(self.options.max_age),
'title': self.options.title}
def str_to_duration(self, s):
if s.strip().lower() == 'never':
return False
parts = re.findall(r'(\d*(?:\.\d+)?)\s*([A-z]+)', s)
argsmap = {'Y|y|year|years|yr|yrs': 365.25 * 24 * 3600,
'M|o|month|months': 30.5 * 24 * 3600,
'W|w|week|weeks': 7 * 24 * 3600,
'D|d|day|days': 24 * 3600,
'H|h|hours|hour|hr|hrs': 3600,
'm|i|minute|minutes|min|mins': 60,
'S|s|second|seconds|sec|secs': 1}
seconds = 0
for number, unit in parts:
for rx, secs in argsmap.iteritems():
if re.match('^(%s)$' % rx, unit):
seconds += float(number) * float(secs)
return int(seconds)
def add_application_options(self, group):
group.add_option('-p', '--public', action='store_true',
help='Make paste public.')
group.add_option('-t', '--title', action='store',
help='Paste title',
type='string')
group.add_option('-m', '--max-age', action='store',
help='Maximum age (duration), default "1 month", "never" for infinite',
type='string', default='1 month')
group.add_option('-E', '--encoding', action='store',
help='Input encoding',
type='string')
| agpl-3.0 |
AICP/external_chromium_org | build/toolchain/win/setup_toolchain.py | 43 | 3742 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import os
import re
import subprocess
import sys
"""
Copies the given "win tool" (which the toolchain uses to wrap compiler
invocations) and the environment blocks for the 32-bit and 64-bit builds on
Windows to the build directory.
The arguments are the visual studio install location and the location of the
win tool. The script assumes that the root build directory is the current dir
and the files will be written to the current directory.
"""
def ExtractImportantEnvironment():
"""Extracts environment variables required for the toolchain from the
current environment."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include', # Needed by midl compiler.
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
result = {}
for envvar in envvars_to_save:
if envvar in os.environ:
envvar = envvar.lower()
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
result[envvar.upper()] = os.path.dirname(sys.executable) + \
os.pathsep + os.environ[envvar]
else:
result[envvar.upper()] = os.environ[envvar]
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in result:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return result
def FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def CopyTool(source_path):
"""Copies the given tool to the current directory, including a warning not
to edit it."""
with open(source_path) as source_file:
tool_source = source_file.readlines()
# Add header and write it out to the current directory (which should be the
# root build dir).
with open("gyp-win-tool", 'w') as tool_file:
tool_file.write(''.join([tool_source[0],
'# Generated by setup_toolchain.py do not edit.\n']
+ tool_source[1:]))
if len(sys.argv) != 4:
print('Usage setup_toolchain.py '
'<visual studio path> <win tool path> <win sdk path>')
sys.exit(2)
vs_path = sys.argv[1]
tool_source = sys.argv[2]
win_sdk_path = sys.argv[3]
CopyTool(tool_source)
important_env_vars = ExtractImportantEnvironment()
path = important_env_vars["PATH"].split(";")
# Add 32-bit compiler path to the beginning and write the block.
path32 = [os.path.join(vs_path, "VC\\BIN")] + \
[os.path.join(win_sdk_path, "bin\\x86")] + \
path
important_env_vars["PATH"] = ";".join(path32)
environ = FormatAsEnvironmentBlock(important_env_vars)
with open('environment.x86', 'wb') as env_file:
env_file.write(environ)
# Add 64-bit compiler path to the beginning and write the block.
path64 = [os.path.join(vs_path, "VC\\BIN\\amd64")] + \
[os.path.join(win_sdk_path, "bin\\x64")] + \
path
important_env_vars["PATH"] = ";".join(path64)
environ = FormatAsEnvironmentBlock(important_env_vars)
with open('environment.x64', 'wb') as env_file:
env_file.write(environ)
| bsd-3-clause |
hstau/covar-cryo | covariance_old1/binning_rot_coverage.py | 4 | 4423 | '''
.. Created 2015
.. codeauthor:: Hstau Y Liao <hstau.y.liao@gmail.com>
'''
import sys
import spider
import eulerangles as eu
import numpy as np, logging
import math
#import glob
from arachnid.core.image import ndimage_file
from arachnid.core.metadata import format
def get_unitv(phi, theta):
u = np.cos(phi) * np.sin(theta) # cos(phi).*sin(theta)
v = np.sin(phi)* np.sin(theta) # sin(phi).*sin(theta)
w = np.cos(theta) # cos(theta)
return np.hstack((u,v,w))
def mcol(u):
# u is an array
u = np.reshape(u,(u.shape[0],1))
return u
def adjust_psi(rpsi,rtheta,rphi,theta,phi):
# number of increment steps for correcting psi angle
N = 50
# big initial dif
dif = 1e10
# rotation matrix for the reference Euler angles
rrot = euler_to_rot(rpsi,rtheta,rphi)
# brute force search
npsi = 0
for i in range(N):
psi = 2*math.pi*i/N
# rotation matrix for particle Euler angles
rot = euler_to_rot(psi,theta,phi)
ndif = np.linalg.norm(rot-rrot)
if (ndif < dif):
npsi = psi
dif = ndif
return npsi
def euler_to_rot(psi, theta, phi):
M1 = eu.euler2mat(phi)
M2 = eu.euler2mat(0, theta)
M3 = eu.euler2mat(psi)
return np.dot(M3, np.dot(M2, M1))
def get_mirror(ima):
ima= np.hstack((ima[:,0].reshape(-1,1),ima[:,-1:0:-1]))
return ima
def bin(align_param_file, ref_ang_file, pref_image_in, pref_image_out, pref_sel, pref_sel_all,thres):
# read in the alignment parameters and the reference angles
# 1st column is psi, 2nd is theta, and 3rd is phi
align = spider.parse(align_param_file)
#align,header = format.read_alignment(align_param_file, ndarray=True)
print("Reconstructing %d particles"%len(align))
#assert(header[0]=='id')
# read in reference angles
refang = spider.parse(ref_ang_file)
index = align[:, 0].astype(np.int)
#refang, header = format.read_alignment(ref_ang_file, ndarray=True)
#assert(header[0]=='id')
# from degree to radian from column 1
align[:,1:4] = np.deg2rad(align[:,1:4])
refang[:,1:4] = np.deg2rad(refang[:,1:4])
# read in pref of images
iter_single_images = ndimage_file.iter_images(pref_image_in, index)
# form unit directional vectors
rphi = mcol(refang[:,3])
rtheta = mcol(refang[:,2])
unit_v = get_unitv(rphi,rtheta)
# 2-array to track indeces of particles in the same angle bin
# Max number of particles in the same angle bin
MAX = 5000
index = np.zeros((refang.shape[0],MAX))
# array to track the number of particles in each bin
quant = np.zeros((refang.shape[0]))
# binning: loop through particles
for i, img in enumerate(iter_single_images):
# direction of one particle
phi = align[i,3]
theta = align[i,2]
uv = get_unitv(phi,theta)
# read in image
#print i
#img = ndimage_file.read_image(img)
if theta > math.pi:
img = get_mirror(img)
ndimage_file.write_image(pref_image_out, img, i)
# multiply with all ref ang and store the largest
ip = np.dot(unit_v,uv.T)
# store the largest in the right bin
bin = ip.argmax()
index[bin,quant[bin]] = align[i,0]
quant[bin] += 1
#print index
# adjust the psi angle
rpsi = refang[bin,1]
rtheta = refang[bin,2]
rphi = refang[bin,3]
psi = adjust_psi(rpsi,rtheta,rphi,theta,phi)
align[i,1] = psi
# loop through the bins and keep only those with more than 'thres' particles
S = [] # will hold the selected bin numbers
count = 0
for j in range(refang.shape[0]):
sz = len(np.nonzero(index[j,:])[0])
if sz > thres:
table = index[j,0:sz]
#print table
filename = pref_sel + '{:05d}'.format(j)
spider.write(filename,table)
S.append(j)
#print S
spider.write(pref_sel_all,S)
if __name__ == '__main__':
align_param_file = sys.argv[1]
ref_ang_file = sys.argv[2]
pref_image_in = sys.argv[3]
pref_image_out = sys.argv[4]
pref_sel = sys.argv[5]
pref_sel_all = sys.argv[6]
thres = int(sys.argv[7])
bin(align_param_file, ref_ang_file, pref_image_in, pref_image_out, pref_sel, pref_sel_all,thres)
| gpl-2.0 |
acshan/odoo | addons/delivery/partner.py | 383 | 1404 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_delivery_carrier': fields.property(
type='many2one',
relation='delivery.carrier',
string="Delivery Method",
help="This delivery method will be used when invoicing from picking."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.