repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
brianjgeiger/osf.io | api/osf_groups/permissions.py | 10 | 1463 | from rest_framework import permissions
from api.base.utils import assert_resource_type, get_user_auth
from osf.utils.permissions import MANAGE
from osf.models import OSFGroup, OSFUser
class IsGroupManager(permissions.BasePermission):
acceptable_models = (OSFGroup,)
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return True
else:
return auth.user and obj.has_permission(auth.user, MANAGE)
class GroupMemberManagement(permissions.BasePermission):
acceptable_models = (OSFGroup, OSFUser, )
def has_object_permission(self, request, view, obj):
if not isinstance(obj, OSFGroup):
obj = OSFGroup.load(request.parser_context['kwargs']['group_id'])
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return True
elif request.method == 'DELETE':
user = OSFUser.load(request.parser_context['kwargs']['user_id'])
# You must have manage permissions on the OSFGroup to remove a member,
# unless you are removing yourself
return obj.has_permission(auth.user, MANAGE) or auth.user == user
else:
return auth.user and obj.has_permission(auth.user, MANAGE)
| apache-2.0 |
Alwnikrotikz/smap-data | python/smap/drivers/veris.py | 6 | 7104 | """
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
"""Driver for the Veris Industries E30 Panel meter, when connected via
a modbus/ethernet bridge.
Required config parameters:
"Address" : ip or hostname of device
"Port" : tcp port number to connect on
"BusID" : modbus bus address of device
Optional parameters:
"Period" [default 30] : number of seconds between device poll.
"""
import sys
import time
import logging
import struct
import threading
import smap.iface.modbus.TCPModbusClient as TCPModbusClient
from smap.driver import SmapDriver
from smap.loader import SmapLoadError
from smap.util import periodicSequentialCall
class VerisMeter:
all_meters = range(1,43)
register_map = {
'current_scale' : (1000, 1041),
'power_scale' : (1042, 1083),
'energy_scale' : (1084, 1125),
'kwh' : (1168, 1251),
'kwh_fl' : (2000, 2083),
'kw' : (1252, 1293),
'pf' : (1294, 1335),
'current' : (1336, 1377),
'pkw' : (1378, 1419),
'maxkw' : (1420, 1461),
'pcurrent': (1462, 1503),
'maxcurrent' : (1504, 1545),
'reset' : (1126, 1167),
}
val_clear_kw = 10203
val_clear_max = 29877
def __init__(self, server, port, bus_addr):
self.server = server
self.port = port
self.bus_addr = bus_addr
self.logger = logging.getLogger('VerisMeter')
self.last_reading_time = 0.0
self.last_reset_energy = None
self.last_reset_time = 0.0
self.boot_time = time.time()
def get_current(self):
values = self.read_reg_range(self.register_map['current'])
scales = self.read_reg_range(self.register_map['current_scale'])
return self.scale_vals(values, scales)
def get_power(self):
values = self.read_reg_range(self.register_map['kw'])
scales = self.read_reg_range(self.register_map['power_scale'])
return self.scale_vals(values, scales)
def get_powerfactor(self):
values = self.read_reg_range(self.register_map['pf'])
scales = [-3] * len(values)
return self.scale_vals(values, scales)
def get_energy_totals(self):
values_16bit = self.read_reg_range(self.register_map['kwh'])
scale = self.read_reg_range(self.register_map['energy_scale'])
values = []
for i in range(0, len(values_16bit) / 2):
values.append(((values_16bit[i*2] & 0xffff) << 16) | (values_16bit[i*2+1] & 0xffff))
return self.scale_vals(values, scale)
def get_energy(self, current=None):
if not current:
current = self.get_energy_totals()
if not self.last_reset_energy:
return current
return map(lambda x,y: x-y, current, self.last_reset_energy)
def reset_energy(self, vals=None):
# reset doesn't seem to work reliably -- just remember what it was last time
if not vals:
newvals = self.get_energy_totals()
else:
newvals = vals
self.last_reset_time = time.time()
self.last_reset_energy = newvals
def scale_vals(self, vals, scale):
return map(lambda x,y: x*(10 ** y), vals, scale)
def read_reg_range(self, (start, end)):
start -= 1
end -= 1
if end < start:
self.logger.error("read_reg_range: invalid range: (%i, %i)" % (start,end))
return None
self.logger.debug("read_reg_range: %i:%i" % (start, end))
now = time.time()
if now - self.last_reading_time < 2:
time.sleep(2 - (now - self.last_reading_time))
response = TCPModbusClient.dev_read(self.server,
self.port,
self.bus_addr,
start, end - start + 1)
self.last_reading_time = time.time()
self.logger.debug("read_reg_range: %i response bytes", response.modbus_val_bytes)
return [TCPModbusClient.get_val(response.modbus_reg_val, i)
for i in range(0, response.modbus_val_bytes / 2)]
class VerisDriver(SmapDriver):
def setup(self, opts):
if not "Address" in opts or \
not "Port" in opts or \
not "BusID" in opts:
raise SmapLoadError("Veris Driver requires Address, Port, and BusID")
self.period = opts.get("Period", 30)
for channel in xrange(1, 43):
self.add_timeseries("/%i/pf" % channel, "pf", data_type="double")
self.add_timeseries("/%i/power" % channel, "kW", data_type="double")
self.add_timeseries("/%i/current" % channel, "A", data_type="double")
self.add_timeseries("/%i/energy" % channel, "kWh", data_type="double")
self.veris = VerisMeter(opts['Address'], int(opts['Port']), int(opts['BusID']))
def start(self):
periodicSequentialCall(self.update).start(self.period)
def update_field(self, field, data):
when = int(time.time())
for i in range(1, 43):
if i > len(data): return
self.add("/%i/%s" % (i, field), when, data[i-1])
def update(self):
try:
logging.debug("Updating meter readings")
current = self.veris.get_current()
self.update_field('current', current)
pf = self.veris.get_powerfactor()
self.update_field('pf', pf)
power = self.veris.get_power()
self.update_field('power', power)
energy = self.veris.get_energy()
self.update_field('energy', energy)
except Exception, e:
logging.error("Exception updating readings: " + str(e))
| bsd-2-clause |
2015fallhw/cptocadp | static/Brython3.2.3-20151122-082712/Lib/importlib/abc.py | 13 | 14999 | """Abstract base classes related to import."""
from . import _bootstrap
from . import machinery
try:
import _frozen_importlib
except ImportError as exc:
if exc.name != '_frozen_importlib':
raise
_frozen_importlib = None
import abc
import imp
import marshal
import sys
import tokenize
import warnings
def _register(abstract_cls, *classes):
for cls in classes:
abstract_cls.register(cls)
if _frozen_importlib is not None:
frozen_cls = getattr(_frozen_importlib, cls.__name__)
abstract_cls.register(frozen_cls)
class Finder(metaclass=abc.ABCMeta):
"""Legacy abstract base class for import finders.
It may be subclassed for compatibility with legacy third party
reimplementations of the import system. Otherwise, finder
implementations should derive from the more specific MetaPathFinder
or PathEntryFinder ABCs.
"""
@abc.abstractmethod
def find_module(self, fullname, path=None):
"""An abstract method that should find a module.
The fullname is a str and the optional path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
class MetaPathFinder(Finder):
"""Abstract base class for import finders on sys.meta_path."""
@abc.abstractmethod
def find_module(self, fullname, path):
"""Abstract method which, when implemented, should find a module.
The fullname is a str and the path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by importlib.invalidate_caches().
"""
return NotImplemented
_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder, machinery.WindowsRegistryFinder)
class PathEntryFinder(Finder):
"""Abstract base class for path entry finders used by PathFinder."""
@abc.abstractmethod
def find_loader(self, fullname):
"""Abstract method which, when implemented, returns a module loader.
The fullname is a str. Returns a 2-tuple of (Loader, portion) where
portion is a sequence of file system locations contributing to part of
a namespace package. The sequence may be empty and the loader may be
None.
"""
raise NotImplementedError
find_module = _bootstrap._find_module_shim
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by PathFinder.invalidate_caches().
"""
return NotImplemented
_register(PathEntryFinder, machinery.FileFinder)
class Loader(metaclass=abc.ABCMeta):
"""Abstract base class for import loaders."""
@abc.abstractmethod
def load_module(self, fullname):
"""Abstract method which when implemented should load a module.
The fullname is a str."""
raise NotImplementedError
@abc.abstractmethod
def module_repr(self, module):
"""Abstract method which when implemented calculates and returns the
given module's repr."""
raise NotImplementedError
class ResourceLoader(Loader):
"""Abstract base class for loaders which can return data from their
back-end storage.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def get_data(self, path):
"""Abstract method which when implemented should return the bytes for
the specified path. The path must be a str."""
raise NotImplementedError
class InspectLoader(Loader):
"""Abstract base class for loaders which support inspection about the
modules they can load.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def is_package(self, fullname):
"""Abstract method which when implemented should return whether the
module is a package. The fullname is a str. Returns a bool."""
raise NotImplementedError
@abc.abstractmethod
def get_code(self, fullname):
"""Abstract method which when implemented should return the code object
for the module. The fullname is a str. Returns a types.CodeType."""
raise NotImplementedError
@abc.abstractmethod
def get_source(self, fullname):
"""Abstract method which should return the source code for the
module. The fullname is a str. Returns a str."""
raise NotImplementedError
_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.ExtensionFileLoader)
class ExecutionLoader(InspectLoader):
"""Abstract base class for loaders that wish to support the execution of
modules as scripts.
This ABC represents one of the optional protocols specified in PEP 302.
"""
@abc.abstractmethod
def get_filename(self, fullname):
"""Abstract method which should return the value that __file__ is to be
set to."""
raise NotImplementedError
class FileLoader(_bootstrap.FileLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class partially implementing the ResourceLoader and
ExecutionLoader ABCs."""
_register(FileLoader, machinery.SourceFileLoader,
machinery.SourcelessFileLoader)
class SourceLoader(_bootstrap.SourceLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class for loading source code (and optionally any
corresponding bytecode).
To support loading from source code, the abstractmethods inherited from
ResourceLoader and ExecutionLoader need to be implemented. To also support
loading from bytecode, the optional methods specified directly by this ABC
is required.
Inherited abstractmethods not implemented in this ABC:
* ResourceLoader.get_data
* ExecutionLoader.get_filename
"""
def path_mtime(self, path):
"""Return the (int) modification time for the path (str)."""
if self.path_stats.__func__ is SourceLoader.path_stats:
raise NotImplementedError
return int(self.path_stats(path)['mtime'])
def path_stats(self, path):
"""Return a metadata dict for the source pointed to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
"""
if self.path_mtime.__func__ is SourceLoader.path_mtime:
raise NotImplementedError
return {'mtime': self.path_mtime(path)}
def set_data(self, path, data):
"""Write the bytes to the path (if possible).
Accepts a str path and data as bytes.
Any needed intermediary directories are to be created. If for some
reason the file cannot be written because of permissions, fail
silently.
"""
raise NotImplementedError
_register(SourceLoader, machinery.SourceFileLoader)
class PyLoader(SourceLoader):
"""Implement the deprecated PyLoader ABC in terms of SourceLoader.
This class has been deprecated! It is slated for removal in Python 3.4.
If compatibility with Python 3.1 is not needed then implement the
SourceLoader ABC instead of this class. If Python 3.1 compatibility is
needed, then use the following idiom to have a single class that is
compatible with Python 3.1 onwards::
try:
from importlib.abc import SourceLoader
except ImportError:
from importlib.abc import PyLoader as SourceLoader
class CustomLoader(SourceLoader):
def get_filename(self, fullname):
# Implement ...
def source_path(self, fullname):
'''Implement source_path in terms of get_filename.'''
try:
return self.get_filename(fullname)
except ImportError:
return None
def is_package(self, fullname):
filename = os.path.basename(self.get_filename(fullname))
return os.path.splitext(filename)[0] == '__init__'
"""
@abc.abstractmethod
def is_package(self, fullname):
raise NotImplementedError
@abc.abstractmethod
def source_path(self, fullname):
"""Abstract method. Accepts a str module name and returns the path to
the source code for the module."""
raise NotImplementedError
def get_filename(self, fullname):
"""Implement get_filename in terms of source_path.
As get_filename should only return a source file path there is no
chance of the path not existing but loading still being possible, so
ImportError should propagate instead of being turned into returning
None.
"""
warnings.warn("importlib.abc.PyLoader is deprecated and is "
"slated for removal in Python 3.4; "
"use SourceLoader instead. "
"See the importlib documentation on how to be "
"compatible with Python 3.1 onwards.",
DeprecationWarning)
path = self.source_path(fullname)
if path is None:
raise ImportError(name=fullname)
else:
return path
class PyPycLoader(PyLoader):
"""Abstract base class to assist in loading source and bytecode by
requiring only back-end storage methods to be implemented.
This class has been deprecated! Removal is slated for Python 3.4. Implement
the SourceLoader ABC instead. If Python 3.1 compatibility is needed, see
PyLoader.
The methods get_code, get_source, and load_module are implemented for the
user.
"""
def get_filename(self, fullname):
"""Return the source or bytecode file path."""
path = self.source_path(fullname)
if path is not None:
return path
path = self.bytecode_path(fullname)
if path is not None:
return path
raise ImportError("no source or bytecode path available for "
"{0!r}".format(fullname), name=fullname)
def get_code(self, fullname):
"""Get a code object from source or bytecode."""
warnings.warn("importlib.abc.PyPycLoader is deprecated and slated for "
"removal in Python 3.4; use SourceLoader instead. "
"If Python 3.1 compatibility is required, see the "
"latest documentation for PyLoader.",
DeprecationWarning)
source_timestamp = self.source_mtime(fullname)
# Try to use bytecode if it is available.
bytecode_path = self.bytecode_path(fullname)
if bytecode_path:
data = self.get_data(bytecode_path)
try:
magic = data[:4]
if len(magic) < 4:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
raw_timestamp = data[4:8]
if len(raw_timestamp) < 4:
raise EOFError("bad timestamp in {}".format(fullname))
pyc_timestamp = _bootstrap._r_long(raw_timestamp)
raw_source_size = data[8:12]
if len(raw_source_size) != 4:
raise EOFError("bad file size in {}".format(fullname))
# Source size is unused as the ABC does not provide a way to
# get the size of the source ahead of reading it.
bytecode = data[12:]
# Verify that the magic number is valid.
if imp.get_magic() != magic:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
# Verify that the bytecode is not stale (only matters when
# there is source to fall back on.
if source_timestamp:
if pyc_timestamp < source_timestamp:
raise ImportError("bytecode is stale", name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
# If source is available give it a shot.
if source_timestamp is not None:
pass
else:
raise
else:
# Bytecode seems fine, so try to use it.
return marshal.loads(bytecode)
elif source_timestamp is None:
raise ImportError("no source or bytecode available to create code "
"object for {0!r}".format(fullname),
name=fullname)
# Use the source.
source_path = self.source_path(fullname)
if source_path is None:
message = "a source path must exist to load {0}".format(fullname)
raise ImportError(message, name=fullname)
source = self.get_data(source_path)
code_object = compile(source, source_path, 'exec', dont_inherit=True)
# Generate bytecode and write it out.
if not sys.dont_write_bytecode:
data = bytearray(imp.get_magic())
data.extend(_bootstrap._w_long(source_timestamp))
data.extend(_bootstrap._w_long(len(source) & 0xFFFFFFFF))
data.extend(marshal.dumps(code_object))
self.write_bytecode(fullname, data)
return code_object
@abc.abstractmethod
def source_mtime(self, fullname):
"""Abstract method. Accepts a str filename and returns an int
modification time for the source of the module."""
raise NotImplementedError
@abc.abstractmethod
def bytecode_path(self, fullname):
"""Abstract method. Accepts a str filename and returns the str pathname
to the bytecode for the module."""
raise NotImplementedError
@abc.abstractmethod
def write_bytecode(self, fullname, bytecode):
"""Abstract method. Accepts a str filename and bytes object
representing the bytecode for the module. Returns a boolean
representing whether the bytecode was written or not."""
raise NotImplementedError
| agpl-3.0 |
zmarano/compute-image-packages | packages/python-google-compute-engine/google_compute_engine/network_utils.py | 6 | 3281 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for configuring IP address forwarding."""
import logging
import os
import re
try:
import netifaces
except ImportError:
netifaces = None
MAC_REGEX = re.compile(r'\A([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})\Z')
class NetworkUtils(object):
"""System network Ethernet interface utilities."""
def __init__(self, logger=logging):
"""Constructor.
Args:
logger: logger object, used to write to SysLog and serial port.
"""
self.logger = logger
self.interfaces = self._CreateInterfaceMap()
def _CreateInterfaceMap(self):
"""Generate a dictionary mapping MAC address to Ethernet interfaces.
Returns:
dict, string MAC addresses mapped to the string network interface name.
"""
if netifaces:
return self._CreateInterfaceMapNetifaces()
else:
return self._CreateInterfaceMapSysfs()
def _CreateInterfaceMapSysfs(self):
"""Generate a dictionary mapping MAC address to Ethernet interfaces.
Returns:
dict, string MAC addresses mapped to the string network interface name.
"""
interfaces = {}
for interface in os.listdir('/sys/class/net'):
try:
mac_address = open(
'/sys/class/net/%s/address' % interface).read().strip()
except (IOError, OSError) as e:
message = 'Unable to determine MAC address for %s. %s.'
self.logger.warning(message, interface, str(e))
else:
interfaces[mac_address] = interface
return interfaces
def _CreateInterfaceMapNetifaces(self):
"""Generate a dictionary mapping MAC address to Ethernet interfaces.
Returns:
dict, string MAC addresses mapped to the string network interface name.
"""
interfaces = {}
for interface in netifaces.interfaces():
af_link = netifaces.ifaddresses(interface).get(netifaces.AF_LINK, [])
mac_address = next(iter(af_link), {}).get('addr', '')
# In some systems this field can come with an empty string or with the
# name of the interface when there is no MAC address associated with it.
# Check the regex to be sure.
if MAC_REGEX.match(mac_address):
interfaces[mac_address] = interface
else:
message = 'Unable to determine MAC address for %s.'
self.logger.warning(message, interface)
return interfaces
def GetNetworkInterface(self, mac_address):
"""Get the name of the network interface associated with a MAC address.
Args:
mac_address: string, the hardware address of the network interface.
Returns:
string, the network interface associated with a MAC address or None.
"""
return self.interfaces.get(mac_address)
| apache-2.0 |
abhishek-ch/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/worksheet.py | 61 | 18052 | # file openpyxl/worksheet.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Worksheet is the 2nd-level container in Excel."""
# Python stdlib imports
import re
# package imports
from . import cell
from .cell import coordinate_from_string, \
column_index_from_string, get_column_letter
from .shared.exc import SheetTitleException, \
InsufficientCoordinatesException, CellCoordinatesException, \
NamedRangeException
from .shared.password_hasher import hash_password
from .style import Style, DEFAULTS as DEFAULTS_STYLE
from .drawing import Drawing
_DEFAULTS_STYLE_HASH = hash(DEFAULTS_STYLE)
def flatten(results):
rows = []
for row in results:
cells = []
for cell in row:
cells.append(cell.value)
rows.append(tuple(cells))
return tuple(rows)
class Relationship(object):
"""Represents many kinds of relationships."""
# TODO: Use this object for workbook relationships as well as
# worksheet relationships
TYPES = {
'hyperlink': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink',
'drawing':'http://schemas.openxmlformats.org/officeDocument/2006/relationships/drawing',
#'worksheet': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet',
#'sharedStrings': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings',
#'styles': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles',
#'theme': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme',
}
def __init__(self, rel_type):
if rel_type not in self.TYPES:
raise ValueError("Invalid relationship type %s" % rel_type)
self.type = self.TYPES[rel_type]
self.target = ""
self.target_mode = ""
self.id = ""
class PageSetup(object):
"""Information about page layout for this sheet"""
pass
class HeaderFooter(object):
"""Information about the header/footer for this sheet."""
pass
class SheetView(object):
"""Information about the visible portions of this sheet."""
pass
class RowDimension(object):
"""Information about the display properties of a row."""
__slots__ = ('row_index',
'height',
'visible',
'outline_level',
'collapsed',
'style_index',)
def __init__(self, index = 0):
self.row_index = index
self.height = -1
self.visible = True
self.outline_level = 0
self.collapsed = False
self.style_index = None
class ColumnDimension(object):
"""Information about the display properties of a column."""
__slots__ = ('column_index',
'width',
'auto_size',
'visible',
'outline_level',
'collapsed',
'style_index',)
def __init__(self, index = 'A'):
self.column_index = index
self.width = -1
self.auto_size = False
self.visible = True
self.outline_level = 0
self.collapsed = False
self.style_index = 0
class PageMargins(object):
"""Information about page margins for view/print layouts."""
def __init__(self):
self.left = self.right = 0.7
self.top = self.bottom = 0.75
self.header = self.footer = 0.3
class SheetProtection(object):
"""Information about protection of various aspects of a sheet."""
def __init__(self):
self.sheet = False
self.objects = False
self.scenarios = False
self.format_cells = False
self.format_columns = False
self.format_rows = False
self.insert_columns = False
self.insert_rows = False
self.insert_hyperlinks = False
self.delete_columns = False
self.delete_rows = False
self.select_locked_cells = False
self.sort = False
self.auto_filter = False
self.pivot_tables = False
self.select_unlocked_cells = False
self._password = ''
def set_password(self, value = '', already_hashed = False):
"""Set a password on this sheet."""
if not already_hashed:
value = hash_password(value)
self._password = value
def _set_raw_password(self, value):
"""Set a password directly, forcing a hash step."""
self.set_password(value, already_hashed = False)
def _get_raw_password(self):
"""Return the password value, regardless of hash."""
return self._password
password = property(_get_raw_password, _set_raw_password,
'get/set the password (if already hashed, '
'use set_password() instead)')
class Worksheet(object):
"""Represents a worksheet.
Do not create worksheets yourself,
use :func:`openpyxl.workbook.Workbook.create_sheet` instead
"""
BREAK_NONE = 0
BREAK_ROW = 1
BREAK_COLUMN = 2
SHEETSTATE_VISIBLE = 'visible'
SHEETSTATE_HIDDEN = 'hidden'
SHEETSTATE_VERYHIDDEN = 'veryHidden'
def __init__(self, parent_workbook, title = 'Sheet'):
self._parent = parent_workbook
self._title = ''
if not title:
self.title = 'Sheet%d' % (1 + len(self._parent.worksheets))
else:
self.title = title
self.row_dimensions = {}
self.column_dimensions = {}
self._cells = {}
self._styles = {}
self._charts = []
self.relationships = []
self.selected_cell = 'A1'
self.active_cell = 'A1'
self.sheet_state = self.SHEETSTATE_VISIBLE
self.page_setup = PageSetup()
self.page_margins = PageMargins()
self.header_footer = HeaderFooter()
self.sheet_view = SheetView()
self.protection = SheetProtection()
self.show_gridlines = True
self.print_gridlines = False
self.show_summary_below = True
self.show_summary_right = True
self.default_row_dimension = RowDimension()
self.default_column_dimension = ColumnDimension()
self._auto_filter = None
self._freeze_panes = None
def __repr__(self):
return '<Worksheet "%s">' % self.title
def garbage_collect(self):
"""Delete cells that are not storing a value."""
delete_list = [coordinate for coordinate, cell in \
self._cells.items() if (cell.value in ('', None) and \
hash(cell.style) == _DEFAULTS_STYLE_HASH)]
for coordinate in delete_list:
del self._cells[coordinate]
def get_cell_collection(self):
"""Return an unordered list of the cells in this worksheet."""
return self._cells.values()
def _set_title(self, value):
"""Set a sheet title, ensuring it is valid."""
bad_title_char_re = re.compile(r'[\\*?:/\[\]]')
if bad_title_char_re.search(value):
msg = 'Invalid character found in sheet title'
raise SheetTitleException(msg)
# check if sheet_name already exists
# do this *before* length check
if self._parent.get_sheet_by_name(value):
# use name, but append with lowest possible integer
i = 1
while self._parent.get_sheet_by_name('%s%d' % (value, i)):
i += 1
value = '%s%d' % (value, i)
if len(value) > 31:
msg = 'Maximum 31 characters allowed in sheet title'
raise SheetTitleException(msg)
self._title = value
def _get_title(self):
"""Return the title for this sheet."""
return self._title
title = property(_get_title, _set_title, doc =
'Get or set the title of the worksheet. '
'Limited to 31 characters, no special characters.')
def _set_auto_filter(self, range):
# Normalize range to a str or None
if not range:
range = None
elif isinstance(range, str):
range = range.upper()
else: # Assume a range
range = range[0][0].address + ':' + range[-1][-1].address
self._auto_filter = range
def _get_auto_filter(self):
return self._auto_filter
auto_filter = property(_get_auto_filter, _set_auto_filter, doc =
'get or set auto filtering on columns')
def _set_freeze_panes(self, topLeftCell):
if not topLeftCell:
topLeftCell = None
elif isinstance(topLeftCell, str):
topLeftCell = topLeftCell.upper()
else: # Assume a cell
topLeftCell = topLeftCell.address
if topLeftCell == 'A1':
topLeftCell = None
self._freeze_panes = topLeftCell
def _get_freeze_panes(self):
return self._freeze_panes
freeze_panes = property(_get_freeze_panes,_set_freeze_panes, doc =
"Get or set frozen panes")
def cell(self, coordinate = None, row = None, column = None):
"""Returns a cell object based on the given coordinates.
Usage: cell(coodinate='A15') **or** cell(row=15, column=1)
If `coordinates` are not given, then row *and* column must be given.
Cells are kept in a dictionary which is empty at the worksheet
creation. Calling `cell` creates the cell in memory when they
are first accessed, to reduce memory usage.
:param coordinate: coordinates of the cell (e.g. 'B12')
:type coordinate: string
:param row: row index of the cell (e.g. 4)
:type row: int
:param column: column index of the cell (e.g. 3)
:type column: int
:raise: InsufficientCoordinatesException when coordinate or (row and column) are not given
:rtype: :class:`openpyxl.cell.Cell`
"""
if not coordinate:
if (row is None or column is None):
msg = "You have to provide a value either for " \
"'coordinate' or for 'row' *and* 'column'"
raise InsufficientCoordinatesException(msg)
else:
coordinate = '%s%s' % (get_column_letter(column + 1), row + 1)
else:
coordinate = coordinate.replace('$', '')
return self._get_cell(coordinate)
def _get_cell(self, coordinate):
if not coordinate in self._cells:
column, row = coordinate_from_string(coordinate)
new_cell = cell.Cell(self, column, row)
self._cells[coordinate] = new_cell
if column not in self.column_dimensions:
self.column_dimensions[column] = ColumnDimension(column)
if row not in self.row_dimensions:
self.row_dimensions[row] = RowDimension(row)
return self._cells[coordinate]
def get_highest_row(self):
"""Returns the maximum row index containing data
:rtype: int
"""
if self.row_dimensions:
return max(self.row_dimensions.keys())
else:
return 1
def get_highest_column(self):
"""Get the largest value for column currently stored.
:rtype: int
"""
if self.column_dimensions:
return max([column_index_from_string(column_index)
for column_index in self.column_dimensions])
else:
return 1
def calculate_dimension(self):
"""Return the minimum bounding range for all cells containing data."""
return 'A1:%s%d' % (get_column_letter(self.get_highest_column()),
self.get_highest_row())
def range(self, range_string, row = 0, column = 0):
"""Returns a 2D array of cells, with optional row and column offsets.
:param range_string: cell range string or `named range` name
:type range_string: string
:param row: number of rows to offset
:type row: int
:param column: number of columns to offset
:type column: int
:rtype: tuples of tuples of :class:`openpyxl.cell.Cell`
"""
if ':' in range_string:
# R1C1 range
result = []
min_range, max_range = range_string.split(':')
min_col, min_row = coordinate_from_string(min_range)
max_col, max_row = coordinate_from_string(max_range)
if column:
min_col = get_column_letter(
column_index_from_string(min_col) + column)
max_col = get_column_letter(
column_index_from_string(max_col) + column)
min_col = column_index_from_string(min_col)
max_col = column_index_from_string(max_col)
cache_cols = {}
for col in xrange(min_col, max_col + 1):
cache_cols[col] = get_column_letter(col)
rows = xrange(min_row + row, max_row + row + 1)
cols = xrange(min_col, max_col + 1)
for row in rows:
new_row = []
for col in cols:
new_row.append(self.cell('%s%s' % (cache_cols[col], row)))
result.append(tuple(new_row))
return tuple(result)
else:
try:
return self.cell(coordinate = range_string, row = row,
column = column)
except CellCoordinatesException:
pass
# named range
named_range = self._parent.get_named_range(range_string)
if named_range is None:
msg = '%s is not a valid range name' % range_string
raise NamedRangeException(msg)
result = []
for destination in named_range.destinations:
worksheet, cells_range = destination
if worksheet is not self:
msg = 'Range %s is not defined on worksheet %s' % \
(cells_range, self.title)
raise NamedRangeException(msg)
content = self.range(cells_range)
if isinstance(content, tuple):
for cells in content:
result.extend(cells)
else:
result.append(content)
if len(result) == 1:
return result[0]
else:
return tuple(result)
def get_style(self, coordinate):
"""Return the style object for the specified cell."""
if not coordinate in self._styles:
self._styles[coordinate] = Style()
return self._styles[coordinate]
def create_relationship(self, rel_type):
"""Add a relationship for this sheet."""
rel = Relationship(rel_type)
self.relationships.append(rel)
rel_id = self.relationships.index(rel)
rel.id = 'rId' + str(rel_id + 1)
return self.relationships[rel_id]
def add_chart(self, chart):
""" Add a chart to the sheet """
chart._sheet = self
self._charts.append(chart)
def append(self, list_or_dict):
"""Appends a group of values at the bottom of the current sheet.
* If it's a list: all values are added in order, starting from the first column
* If it's a dict: values are assigned to the columns indicated by the keys (numbers or letters)
:param list_or_dict: list or dict containing values to append
:type list_or_dict: list/tuple or dict
Usage:
* append(['This is A1', 'This is B1', 'This is C1'])
* **or** append({'A' : 'This is A1', 'C' : 'This is C1'})
* **or** append({0 : 'This is A1', 2 : 'This is C1'})
:raise: TypeError when list_or_dict is neither a list/tuple nor a dict
"""
row_idx = len(self.row_dimensions)
if isinstance(list_or_dict, (list, tuple)):
for col_idx, content in enumerate(list_or_dict):
self.cell(row = row_idx, column = col_idx).value = content
elif isinstance(list_or_dict, dict):
for col_idx, content in list_or_dict.items():
if isinstance(col_idx, basestring):
col_idx = column_index_from_string(col_idx) - 1
self.cell(row = row_idx, column = col_idx).value = content
else:
raise TypeError('list_or_dict must be a list or a dict')
@property
def rows(self):
return self.range(self.calculate_dimension())
@property
def columns(self):
max_row = self.get_highest_row()
cols = []
for col_idx in range(self.get_highest_column()):
col = get_column_letter(col_idx+1)
res = self.range('%s1:%s%d' % (col, col, max_row))
cols.append(tuple([x[0] for x in res]))
return tuple(cols)
| apache-2.0 |
ljgabc/lfs | usr/lib/python2.7/lib-tk/test/runtktests.py | 61 | 4100 | """
Use this module to get and run all tk tests.
Tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.test_support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
_tk_unavailable = None
def check_tk_availability():
"""Check that Tk is installed and available."""
global _tk_unavailable
if _tk_unavailable is None:
_tk_unavailable = False
if sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
_tk_unavailable = "cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
_tk_unavailable = "cannot run without OS X gui process"
else: # not OS X
import Tkinter
try:
Tkinter.Button()
except Tkinter.TclError as msg:
# assuming tk is not available
_tk_unavailable = "tk not available: %s" % msg
if _tk_unavailable:
raise unittest.SkipTest(_tk_unavailable)
return
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that want
their tests colleted.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s" % name[:-len(py_ext)], pkg_name)
except test.test_support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.test_support.use_resources = ['gui']
test.test_support.run_unittest(*get_tests())
| gpl-2.0 |
aselle/tensorflow | tensorflow/python/training/ftrl.py | 16 | 12154 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.FtrlOptimizer")
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
See this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
self._zeros_slot(v, "linear", self._linear_name or self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/logic/azure-mgmt-logic/azure/mgmt/logic/operations/_workflow_trigger_histories_operations.py | 1 | 13103 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WorkflowTriggerHistoriesOperations(object):
"""WorkflowTriggerHistoriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.logic.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
workflow_name, # type: str
trigger_name, # type: str
top=None, # type: Optional[int]
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WorkflowTriggerHistoryListResult"]
"""Gets a list of workflow trigger histories.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation. Options for filters include: Status,
StartTime, and ClientTrackingId.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkflowTriggerHistoryListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.logic.models.WorkflowTriggerHistoryListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkflowTriggerHistoryListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WorkflowTriggerHistoryListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories'} # type: ignore
def get(
self,
resource_group_name, # type: str
workflow_name, # type: str
trigger_name, # type: str
history_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WorkflowTriggerHistory"
"""Gets a workflow trigger history.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param history_name: The workflow trigger history name. Corresponds to the run name for
triggers that resulted in a run.
:type history_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkflowTriggerHistory, or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.WorkflowTriggerHistory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkflowTriggerHistory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str'),
'historyName': self._serialize.url("history_name", history_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkflowTriggerHistory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories/{historyName}'} # type: ignore
def resubmit(
self,
resource_group_name, # type: str
workflow_name, # type: str
trigger_name, # type: str
history_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Resubmits a workflow run based on the trigger history.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param history_name: The workflow trigger history name. Corresponds to the run name for
triggers that resulted in a run.
:type history_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.resubmit.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str'),
'historyName': self._serialize.url("history_name", history_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resubmit.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories/{historyName}/resubmit'} # type: ignore
| mit |
MaximNevrov/neutron | neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/test_mech_macvtap.py | 4 | 4005 | # Copyright (c) 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.macvtap.mech_driver import mech_macvtap
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
class MacvtapMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_MACVTAP
CAP_PORT_FILTER = False
AGENT_TYPE = constants.AGENT_TYPE_MACVTAP
GOOD_MAPPINGS = {'fake_physical_network': 'fake_if'}
GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_if'}
BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS,
'host': 'host'}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'dead_host'}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'bad_host_1'},
{'alive': True,
'configurations': BAD_CONFIGS,
'host': 'bad_host_2'}]
def setUp(self):
super(MacvtapMechanismBaseTestCase, self).setUp()
self.driver = mech_macvtap.MacvtapMechanismDriver()
self.driver.initialize()
class MacvtapMechanismGenericTestCase(MacvtapMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class MacvtapMechanismFlatTestCase(MacvtapMechanismBaseTestCase,
base.AgentMechanismFlatTestCase):
def test_type_flat_vif_details(self):
context = base.FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.FLAT_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
vif_details = context._bound_vif_details
self.assertIsNone(vif_details.get(portbindings.VIF_DETAILS_VLAN))
self.assertEqual("bridge", vif_details.get(
portbindings.VIF_DETAILS_MACVTAP_MODE))
self.assertEqual("fake_if", vif_details.get(
portbindings.VIF_DETAILS_PHYSICAL_INTERFACE))
self.assertEqual("fake_if", vif_details.get(
portbindings.VIF_DETAILS_MACVTAP_SOURCE))
class MacvtapMechanismVlanTestCase(MacvtapMechanismBaseTestCase,
base.AgentMechanismVlanTestCase):
def test_type_vlan_vif_details(self):
context = base.FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
vif_details = context._bound_vif_details
self.assertEqual(1234, vif_details.get(portbindings.VIF_DETAILS_VLAN))
self.assertEqual("bridge", vif_details.get(
portbindings.VIF_DETAILS_MACVTAP_MODE))
self.assertEqual("fake_if", vif_details.get(
portbindings.VIF_DETAILS_PHYSICAL_INTERFACE))
self.assertEqual("fake_if.1234", vif_details.get(
portbindings.VIF_DETAILS_MACVTAP_SOURCE))
| apache-2.0 |
emergence/django-simple-history | setup.py | 1 | 1376 | from setuptools import setup
import simple_history
tests_require = [
'Django>=1.6', 'WebTest==2.0.24', 'django-webtest==1.8.0', 'mock==1.0.1']
setup(
name='django-simple-history',
version=simple_history.__version__,
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
maintainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=[
"simple_history",
"simple_history.templatetags",
"simple_history.management",
"simple_history.management.commands",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
"License :: OSI Approved :: BSD License",
],
tests_require=tests_require,
include_package_data=True,
test_suite='runtests.main',
)
| bsd-3-clause |
cheehieu/bitcoin | qa/rpc-tests/invalidblockrequest.py | 87 | 4077 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance
from test_framework.mininode import *
from test_framework.blocktools import *
import logging
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
block = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
# chr(81) is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50*100000000)
tx2 = create_transaction(tx1, 0, chr(81), 50*100000000)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, False], [block2_orig, True]])
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 100*100000000 # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, False]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| mit |
kouaw/CouchPotatoServer | couchpotato/core/downloaders/transmission.py | 6 | 14359 | from base64 import b64encode
from datetime import timedelta
import httplib
import json
import os.path
import re
import urllib2
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost
from couchpotato.core.logger import CPLog
log = CPLog(__name__)
autoload = 'Transmission'
class Transmission(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
trpc = None
def connect(self):
# Load host from config and split out port.
host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
return self.trpc
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Set parameters for adding torrent
params = {
'paused': self.conf('paused', default = False)
}
if self.conf('directory'):
if os.path.isdir(self.conf('directory')):
params['download-dir'] = self.conf('directory')
else:
log.error('Download directory from Transmission settings: %s doesn\'t exist', self.conf('directory'))
# Change parameters of torrent
torrent_params = {}
if data.get('seed_ratio'):
torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio'))
torrent_params['seedRatioMode'] = 1
if data.get('seed_time'):
torrent_params['seedIdleLimit'] = tryInt(data.get('seed_time')) * 60
torrent_params['seedIdleMode'] = 1
# Send request to Transmission
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
else:
remote_torrent = self.trpc.add_torrent_file(b64encode(filedata), arguments = params)
if not remote_torrent:
log.error('Failed sending torrent to Transmission')
return False
data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
# Change settings of added torrents
if torrent_params:
self.trpc.set_torrent(data['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(data['hashString'])
def test(self):
if self.connect() and self.trpc.get_session():
return True
return False
def getAllDownloadStatus(self, ids):
log.debug('Checking Transmission download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
}
session = self.trpc.get_session()
queue = self.trpc.get_alltorrents(return_params)
if not (queue and queue.get('torrents')):
log.debug('Nothing in queue or error')
return []
for torrent in queue['torrents']:
if torrent['hashString'] in ids:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s',
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir']))
status = 'busy'
if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
if session['incomplete-dir-enabled'] and status == 'busy':
torrent_folder = session['incomplete-dir']
else:
torrent_folder = torrent['downloadDir']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent_folder, file_item['name'])))
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])),
'files': torrent_files
})
return release_downloads
def pause(self, release_download, pause = True):
if pause:
return self.trpc.stop_torrent(release_download['id'])
else:
return self.trpc.start_torrent(release_download['id'])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.trpc.remove_torrent(release_download['id'], True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(release_download['id'], delete_files)
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = 'Transmission', uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:
log.debug('User or password missing, not using authentication.')
self.session = self.get_session()
def _request(self, ojson):
self.tag += 1
headers = {'x-transmission-session-id': str(self.session_id)}
request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers)
try:
open_request = urllib2.urlopen(request)
response = json.loads(open_request.read())
log.debug('request: %s', json.dumps(ojson))
log.debug('response: %s', json.dumps(response))
if response['result'] == 'success':
log.debug('Transmission action successful')
return response['arguments']
else:
log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
return False
except httplib.InvalidURL as err:
log.error('Invalid Transmission host, check your config %s', err)
return False
except urllib2.HTTPError as err:
if err.code == 401:
log.error('Invalid Transmission Username or Password, check your config')
return False
elif err.code == 409:
msg = str(err.read())
try:
self.session_id = \
re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1)
log.debug('X-Transmission-Session-Id: %s', self.session_id)
# #resend request with the updated header
return self._request(ojson)
except:
log.error('Unable to get Transmission Session-Id %s', err)
else:
log.error('TransmissionRPC HTTPError: %s', err)
except urllib2.URLError as err:
log.error('Unable to connect to Transmission %s', err)
def get_session(self):
post_data = {'method': 'session-get', 'tag': self.tag}
return self._request(post_data)
def add_torrent_uri(self, torrent, arguments):
arguments['filename'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def add_torrent_file(self, torrent, arguments):
arguments['metainfo'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def set_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag}
return self._request(post_data)
def get_alltorrents(self, arguments):
post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
return self._request(post_data)
def stop_torrent(self, torrent_id):
post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-stop', 'tag': self.tag}
return self._request(post_data)
def start_torrent(self, torrent_id):
post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-start', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, delete_local_data):
post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag}
return self._request(post_data)
config = [{
'name': 'transmission',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'transmission',
'label': 'Transmission',
'description': 'Use <a href="http://www.transmissionbt.com/" target="_blank">Transmission</a> to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'host',
'default': 'http://localhost:9091',
'description': 'Hostname with port. Usually <strong>http://localhost:9091</strong>',
},
{
'name': 'rpc_url',
'type': 'string',
'default': 'transmission',
'advanced': True,
'description': 'Change if you don\'t run Transmission RPC at the default url.',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Transmission download directory.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Remove the torrent from Transmission after it finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'stalled_as_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Consider a stalled torrent as failed',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| gpl-3.0 |
praba230890/PYPOWER | pypower/t/t_opf_dc_pips.py | 2 | 5674 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Tests for DC optimal power flow using PIPS solver.
"""
from os.path import dirname, join
from numpy import array, ones, Inf, arange, r_
from scipy.io import loadmat
from scipy.sparse import csr_matrix as sparse
from pypower.ppoption import ppoption
from pypower.rundcopf import rundcopf
from pypower.loadcase import loadcase
from pypower.idx_bus import \
BUS_AREA, BASE_KV, VMIN, VM, VA, LAM_P, LAM_Q, MU_VMIN, MU_VMAX
from pypower.idx_gen import \
GEN_BUS, QMAX, QMIN, MBASE, APF, PG, QG, VG, MU_PMAX, MU_QMIN
from pypower.idx_brch import \
ANGMAX, PF, QT, MU_SF, MU_ST
from pypower.t.t_begin import t_begin
from pypower.t.t_is import t_is
from pypower.t.t_ok import t_ok
from pypower.t.t_end import t_end
def t_opf_dc_pips(quiet=False):
"""Tests for DC optimal power flow using PIPS solver.
@author: Ray Zimmerman (PSERC Cornell)
"""
num_tests = 23
t_begin(num_tests, quiet)
tdir = dirname(__file__)
casefile = join(tdir, 't_case9_opf')
verbose = 0#not quiet
t0 = 'DC OPF (PIPS): '
ppopt = ppoption(VERBOSE=verbose, OUT_ALL=0, OPF_ALG_DC=200)
## run DC OPF
## set up indices
ib_data = r_[arange(BUS_AREA + 1), arange(BASE_KV, VMIN + 1)]
ib_voltage = arange(VM, VA + 1)
ib_lam = arange(LAM_P, LAM_Q + 1)
ib_mu = arange(MU_VMAX, MU_VMIN + 1)
ig_data = r_[[GEN_BUS, QMAX, QMIN], arange(MBASE, APF + 1)]
ig_disp = array([PG, QG, VG])
ig_mu = arange(MU_PMAX, MU_QMIN + 1)
ibr_data = arange(ANGMAX + 1)
ibr_flow = arange(PF, QT + 1)
ibr_mu = array([MU_SF, MU_ST])
#ibr_angmu = array([MU_ANGMIN, MU_ANGMAX])
## get solved DC power flow case from MAT-file
soln9_dcopf = loadmat(join(tdir, 'soln9_dcopf.mat'), struct_as_record=True)
## defines bus_soln, gen_soln, branch_soln, f_soln
bus_soln = soln9_dcopf['bus_soln']
gen_soln = soln9_dcopf['gen_soln']
branch_soln = soln9_dcopf['branch_soln']
f_soln = soln9_dcopf['f_soln'][0]
## run OPF
t = t0
r = rundcopf(casefile, ppopt)
bus, gen, branch, f, success = \
r['bus'], r['gen'], r['branch'], r['f'], r['success']
t_ok(success, [t, 'success'])
t_is(f, f_soln, 3, [t, 'f'])
t_is( bus[:, ib_data ], bus_soln[:, ib_data ], 10, [t, 'bus data'])
t_is( bus[:, ib_voltage], bus_soln[:, ib_voltage], 3, [t, 'bus voltage'])
t_is( bus[:, ib_lam ], bus_soln[:, ib_lam ], 3, [t, 'bus lambda'])
t_is( bus[:, ib_mu ], bus_soln[:, ib_mu ], 2, [t, 'bus mu'])
t_is( gen[:, ig_data ], gen_soln[:, ig_data ], 10, [t, 'gen data'])
t_is( gen[:, ig_disp ], gen_soln[:, ig_disp ], 3, [t, 'gen dispatch'])
t_is( gen[:, ig_mu ], gen_soln[:, ig_mu ], 3, [t, 'gen mu'])
t_is(branch[:, ibr_data ], branch_soln[:, ibr_data ], 10, [t, 'branch data'])
t_is(branch[:, ibr_flow ], branch_soln[:, ibr_flow ], 3, [t, 'branch flow'])
t_is(branch[:, ibr_mu ], branch_soln[:, ibr_mu ], 2, [t, 'branch mu'])
##----- run OPF with extra linear user constraints & costs -----
## two new z variables
## 0 <= z1, P2 - P1 <= z1
## 0 <= z2, P2 - P3 <= z2
## with A and N sized for DC opf
ppc = loadcase(casefile)
row = [0, 0, 0, 1, 1, 1]
col = [9, 10, 12, 10, 11, 13]
ppc['A'] = sparse(([-1, 1, -1, 1, -1, -1], (row, col)), (2, 14))
ppc['u'] = array([0, 0])
ppc['l'] = array([-Inf, -Inf])
ppc['zl'] = array([0, 0])
ppc['N'] = sparse(([1, 1], ([0, 1], [12, 13])), (2, 14)) ## new z variables only
ppc['fparm'] = ones((2, 1)) * array([[1, 0, 0, 1]]) ## w = r = z
ppc['H'] = sparse((2, 2)) ## no quadratic term
ppc['Cw'] = array([1000, 1])
t = ''.join([t0, 'w/extra constraints & costs 1 : '])
r = rundcopf(ppc, ppopt)
t_ok(r['success'], [t, 'success'])
t_is(r['gen'][0, PG], 116.15974, 4, [t, 'Pg1 = 116.15974'])
t_is(r['gen'][1, PG], 116.15974, 4, [t, 'Pg2 = 116.15974'])
t_is(r['var']['val']['z'], [0, 0.3348], 4, [t, 'user vars'])
t_is(r['cost']['usr'], 0.3348, 3, [t, 'user costs'])
## with A and N sized for AC opf
ppc = loadcase(casefile)
row = [0, 0, 0, 1, 1, 1]
col = [18, 19, 24, 19, 20, 25]
ppc['A'] = sparse(([-1, 1, -1, 1, -1, -1], (row, col)), (2, 26))
ppc['u'] = array([0, 0])
ppc['l'] = array([-Inf, -Inf])
ppc['zl'] = array([0, 0])
ppc['N'] = sparse(([1, 1], ([0, 1], [24, 25])), (2, 26)) ## new z variables only
ppc['fparm'] = ones((2, 1)) * array([[1, 0, 0, 1]]) ## w = r = z
ppc['H'] = sparse((2, 2)) ## no quadratic term
ppc['Cw'] = array([1000, 1])
t = ''.join([t0, 'w/extra constraints & costs 2 : '])
r = rundcopf(ppc, ppopt)
t_ok(r['success'], [t, 'success'])
t_is(r['gen'][0, PG], 116.15974, 4, [t, 'Pg1 = 116.15974'])
t_is(r['gen'][1, PG], 116.15974, 4, [t, 'Pg2 = 116.15974'])
t_is(r['var']['val']['z'], [0, 0.3348], 4, [t, 'user vars'])
t_is(r['cost']['usr'], 0.3348, 3, [t, 'user costs'])
t = ''.join([t0, 'infeasible : '])
## with A and N sized for DC opf
ppc = loadcase(casefile)
ppc['A'] = sparse(([1, 1], ([0, 0], [9, 10])), (1, 14)) ## Pg1 + Pg2
ppc['u'] = array([Inf])
ppc['l'] = array([600])
r = rundcopf(ppc, ppopt)
t_ok(not r['success'], [t, 'no success'])
t_end()
if __name__ == '__main__':
t_opf_dc_pips(quiet=False)
| bsd-3-clause |
rajpushkar83/cloudmesh | cloudmesh/iaas/openstack/queue/tasks.py | 1 | 1498 | from __future__ import absolute_import
from celery import current_task
from celery.utils.log import get_task_logger
from cloudmesh.iaas.openstack.queue.celery import celery_openstack_queue
from cloudmesh.cm_mongo import cm_mongo
import time
#
# logger = get_task_logger(__name__)
#
@celery_openstack_queue.task(track_started=True)
def refresh(cm_user_id=None, names=None, types=None):
if isinstance(names, str):
names = [names]
if isinstance(types, str):
types = [types]
clouds = cm_mongo()
clouds.activate(cm_user_id=cm_user_id, names=names)
clouds.refresh(cm_user_id=cm_user_id, names=names, types=types)
@celery_openstack_queue.task(track_started=True)
def vm_create(name, flavor_name, image_id, security_groups=None, key_name=None,
meta={}, userdata=None, manager=None):
obj = manager
obj.vm_create(name, flavor_name, image_id, security_groups, key_name, meta,
userdata)
@celery_openstack_queue.task(track_started=True)
def vm_delete(cloud, server, cm_user_id):
mongo = cm_mongo()
mongo.activate(cm_user_id=cm_user_id, names=[cloud])
mongo.vm_delete(cloud, server, cm_user_id)
@celery_openstack_queue.task(track_started=True)
def release_unused_public_ips(cloud, cm_user_id):
mongo = cm_mongo()
mongo.activate(cm_user_id=cm_user_id, names=[cloud])
mongo.release_unused_public_ips(cloud, cm_user_id)
@celery_openstack_queue.task(track_started=True)
def wait(t=0):
time.sleep(t)
| apache-2.0 |
Ms2ger/servo | tests/wpt/web-platform-tests/old-tests/webdriver/command_contexts/window_size_test.py | 141 | 1050 | import os
import sys
import random
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions
class WindowSizeTest(base_test.WebDriverBaseTest):
def test_set_and_get_window_size(self):
self.driver.get(self.webserver.where_is("command_contexts/res/first-page.html"))
initial_dimensions = self.driver.get_window_size()
new_dimensions = {
"height": initial_dimensions["height"] - 100,
"width": initial_dimensions["width"] - 100}
try:
self.driver.set_window_size(new_dimensions["height"], new_dimensions["width"])
actual_dimensions = self.driver.get_window_size()
self.assertDictEqual(new_dimensions, actual_dimensions)
except exceptions.UnsupportedOperationException:
pass
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
rosmo/ansible | lib/ansible/modules/cloud/openstack/os_nova_host_aggregate.py | 31 | 6134 | #!/usr/bin/python
# Copyright 2016 Jakub Jursa <jakub.jursa1@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_nova_host_aggregate
short_description: Manage OpenStack host aggregates
extends_documentation_fragment: openstack
author: "Jakub Jursa (@kuboj)"
version_added: "2.3"
description:
- Create, update, or delete OpenStack host aggregates. If a aggregate
with the supplied name already exists, it will be updated with the
new name, new availability zone, new metadata and new list of hosts.
options:
name:
description: Name of the aggregate.
required: true
metadata:
description: Metadata dict.
availability_zone:
description: Availability zone to create aggregate into.
hosts:
description: List of hosts to set for an aggregate.
state:
description: Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a host aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: present
name: db_aggregate
hosts:
- host1
- host2
metadata:
type: dbcluster
# Delete an aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: absent
name: db_aggregate
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, aggregate):
new_metadata = (module.params['metadata'] or {})
if module.params['availability_zone'] is not None:
new_metadata['availability_zone'] = module.params['availability_zone']
if ((module.params['name'] != aggregate.name) or
(module.params['hosts'] is not None and set(module.params['hosts']) != set(aggregate.hosts)) or
(module.params['availability_zone'] is not None and module.params['availability_zone'] != aggregate.availability_zone) or
(module.params['metadata'] is not None and new_metadata != aggregate.metadata)):
return True
return False
def _system_state_change(module, aggregate):
state = module.params['state']
if state == 'absent' and aggregate:
return True
if state == 'present':
if aggregate is None:
return True
return _needs_update(module, aggregate)
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
metadata=dict(required=False, default=None, type='dict'),
availability_zone=dict(required=False, default=None),
hosts=dict(required=False, default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
name = module.params['name']
metadata = module.params['metadata']
availability_zone = module.params['availability_zone']
hosts = module.params['hosts']
state = module.params['state']
if metadata is not None:
metadata.pop('availability_zone', None)
sdk, cloud = openstack_cloud_from_module(module)
try:
aggregates = cloud.search_aggregates(name_or_id=name)
if len(aggregates) == 1:
aggregate = aggregates[0]
elif len(aggregates) == 0:
aggregate = None
else:
raise Exception("Should not happen")
if module.check_mode:
module.exit_json(changed=_system_state_change(module, aggregate))
if state == 'present':
if aggregate is None:
aggregate = cloud.create_aggregate(name=name,
availability_zone=availability_zone)
if hosts:
for h in hosts:
cloud.add_host_to_aggregate(aggregate.id, h)
if metadata:
cloud.set_aggregate_metadata(aggregate.id, metadata)
changed = True
else:
if _needs_update(module, aggregate):
if availability_zone is not None:
aggregate = cloud.update_aggregate(aggregate.id, name=name,
availability_zone=availability_zone)
if metadata is not None:
metas = metadata
for i in (set(aggregate.metadata.keys()) - set(metadata.keys())):
if i != 'availability_zone':
metas[i] = None
cloud.set_aggregate_metadata(aggregate.id, metas)
if hosts is not None:
for i in (set(aggregate.hosts) - set(hosts)):
cloud.remove_host_from_aggregate(aggregate.id, i)
for i in (set(hosts) - set(aggregate.hosts)):
cloud.add_host_to_aggregate(aggregate.id, i)
changed = True
else:
changed = False
module.exit_json(changed=changed)
elif state == 'absent':
if aggregate is None:
changed = False
else:
if hosts:
for h in hosts:
cloud.remove_host_from_aggregate(aggregate.id, h)
cloud.delete_aggregate(aggregate.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
waheedahmed/edx-platform | lms/djangoapps/bulk_email/migrations/0001_initial.py | 49 | 2975 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseAuthorization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', xmodule_django.models.CourseKeyField(unique=True, max_length=255, db_index=True)),
('email_enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CourseEmail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.CharField(max_length=128, db_index=True)),
('subject', models.CharField(max_length=128, blank=True)),
('html_message', models.TextField(null=True, blank=True)),
('text_message', models.TextField(null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('to_option', models.CharField(default=b'myself', max_length=64, choices=[(b'myself', b'Myself'), (b'staff', b'Staff and instructors'), (b'all', b'All')])),
('template_name', models.CharField(max_length=255, null=True)),
('from_addr', models.CharField(max_length=255, null=True)),
('sender', models.ForeignKey(default=1, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='CourseEmailTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('html_template', models.TextField(null=True, blank=True)),
('plain_template', models.TextField(null=True, blank=True)),
('name', models.CharField(max_length=255, unique=True, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Optout',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AlterUniqueTogether(
name='optout',
unique_together=set([('user', 'course_id')]),
),
]
| agpl-3.0 |
hashworks/CouchPotatoServer | libs/cache/posixemulation.py | 193 | 3541 | # -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import errno
import time
import random
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError, e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True
| gpl-3.0 |
mgedmin/ansible | test/integration/cleanup_rax.py | 229 | 6516 | #!/usr/bin/env python
import os
import re
import yaml
import argparse
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_list_iterator(svc, *args, **kwargs):
method = kwargs.pop('method', 'list')
items = getattr(svc, method)(*args, **kwargs)
while items:
retrieved = getattr(svc, method)(*args, marker=items[-1].id, **kwargs)
if items and retrieved and items[-1].id == retrieved[0].id:
del items[-1]
items.extend(retrieved)
if len(retrieved) < 2:
break
return items
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-y', '--yes', action='store_true', dest='assumeyes',
default=False, help="Don't prompt for confirmation")
parser.add_argument('--match', dest='match_re',
default='^ansible-testing',
help='Regular expression used to find resources '
'(default: %(default)s)')
return parser.parse_args()
def authenticate():
try:
with open(os.path.realpath('./credentials.yml')) as f:
credentials = yaml.load(f)
except Exception as e:
raise SystemExit(e)
try:
pyrax.set_credentials(credentials.get('rackspace_username'),
credentials.get('rackspace_api_key'))
except Exception as e:
raise SystemExit(e)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = raw_input(prompt).lower() == 'y'
assert hasattr(item, 'delete') or hasattr(item, 'terminate'), \
"Class <%s> has no delete or terminate attribute" % item.__class__
if assumeyes:
if hasattr(item, 'delete'):
item.delete()
print ("Deleted %s" % item)
if hasattr(item, 'terminate'):
item.terminate()
print ("Terminated %s" % item)
def delete_rax(args):
"""Function for deleting CloudServers"""
print ("--- Cleaning CloudServers matching '%s'" % args.match_re)
search_opts = dict(name='^%s' % args.match_re)
for region in pyrax.identity.services.compute.regions:
cs = pyrax.connect_to_cloudservers(region=region)
servers = rax_list_iterator(cs.servers, search_opts=search_opts)
for server in servers:
prompt_and_delete(server,
'Delete matching %s? [y/n]: ' % server,
args.assumeyes)
def delete_rax_clb(args):
"""Function for deleting Cloud Load Balancers"""
print ("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re)
for region in pyrax.identity.services.load_balancer.regions:
clb = pyrax.connect_to_cloud_loadbalancers(region=region)
for lb in rax_list_iterator(clb):
if re.search(args.match_re, lb.name):
prompt_and_delete(lb,
'Delete matching %s? [y/n]: ' % lb,
args.assumeyes)
def delete_rax_keypair(args):
"""Function for deleting Rackspace Key pairs"""
print ("--- Cleaning Key Pairs matching '%s'" % args.match_re)
for region in pyrax.identity.services.compute.regions:
cs = pyrax.connect_to_cloudservers(region=region)
for keypair in cs.keypairs.list():
if re.search(args.match_re, keypair.name):
prompt_and_delete(keypair,
'Delete matching %s? [y/n]: ' % keypair,
args.assumeyes)
def delete_rax_network(args):
"""Function for deleting Cloud Networks"""
print ("--- Cleaning Cloud Networks matching '%s'" % args.match_re)
for region in pyrax.identity.services.network.regions:
cnw = pyrax.connect_to_cloud_networks(region=region)
for network in cnw.list():
if re.search(args.match_re, network.name):
prompt_and_delete(network,
'Delete matching %s? [y/n]: ' % network,
args.assumeyes)
def delete_rax_cbs(args):
"""Function for deleting Cloud Networks"""
print ("--- Cleaning Cloud Block Storage matching '%s'" % args.match_re)
for region in pyrax.identity.services.network.regions:
cbs = pyrax.connect_to_cloud_blockstorage(region=region)
for volume in cbs.list():
if re.search(args.match_re, volume.name):
prompt_and_delete(volume,
'Delete matching %s? [y/n]: ' % volume,
args.assumeyes)
def delete_rax_cdb(args):
"""Function for deleting Cloud Databases"""
print ("--- Cleaning Cloud Databases matching '%s'" % args.match_re)
for region in pyrax.identity.services.database.regions:
cdb = pyrax.connect_to_cloud_databases(region=region)
for db in rax_list_iterator(cdb):
if re.search(args.match_re, db.name):
prompt_and_delete(db,
'Delete matching %s? [y/n]: ' % db,
args.assumeyes)
def _force_delete_rax_scaling_group(manager):
def wrapped(uri):
manager.api.method_delete('%s?force=true' % uri)
return wrapped
def delete_rax_scaling_group(args):
"""Function for deleting Autoscale Groups"""
print ("--- Cleaning Autoscale Groups matching '%s'" % args.match_re)
for region in pyrax.identity.services.autoscale.regions:
asg = pyrax.connect_to_autoscale(region=region)
for group in rax_list_iterator(asg):
if re.search(args.match_re, group.name):
group.manager._delete = \
_force_delete_rax_scaling_group(group.manager)
prompt_and_delete(group,
'Delete matching %s? [y/n]: ' % group,
args.assumeyes)
def main():
if not HAS_PYRAX:
raise SystemExit('The pyrax python module is required for this script')
args = parse_args()
authenticate()
funcs = [f for n, f in globals().items() if n.startswith('delete_rax')]
for func in sorted(funcs, key=lambda f: f.__name__):
try:
func(args)
except Exception as e:
print ("---- %s failed (%s)" % (func.__name__, e.message))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print ('\nExiting...')
| gpl-3.0 |
frouty/odoogoeen | openerp/addons/base/res/report/__init__.py | 79 | 1201 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#from report import report_sxw
#report_sxw.report_sxw('report.partner.list', 'res.partner', 'addons/base/res/partner/report/partner_list.rml')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zrhans/python | exemplos/diveintopythonzh-cn-5.4b/py/plural/stage3/pluraltest3.py | 4 | 1615 | """Unit test for plural3.py
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/03/17 14:34:40 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
from plural3 import plural
import unittest, new
class KnownValues(unittest.TestCase):
nouns = {'bass': 'basses',
'bus': 'buses',
'walrus': 'walruses',
'box': 'boxes',
'fax': 'faxes',
'suffix': 'suffixes',
'mailbox': 'mailboxes',
'buzz': 'buzzes',
'waltz': 'waltzes',
'coach': 'coaches',
'glitch': 'glitches',
'rash': 'rashes',
'watch': 'watches',
'cheetah': 'cheetahs',
'cough': 'coughs',
'utility': 'utilities',
'vacancy': 'vacancies',
'boy': 'boys',
'day': 'days',
'computer': 'computers',
'rock': 'rocks',
'paper': 'papers',
}
for noun, pluralnoun in KnownValues.nouns.items():
func = lambda self, noun=noun, pluralnoun=pluralnoun: \
KnownValues.failUnlessEqual(self, plural(noun), pluralnoun)
func.__doc__ = "%s --> %s" % (noun, pluralnoun)
instanceMethod = new.instancemethod(func, None, KnownValues)
setattr(KnownValues, "test_%s" % noun, instanceMethod)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
luci/luci-py | client/third_party/infra_libs/utils.py | 3 | 8205 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Miscellaneous utility functions."""
import contextlib
import datetime
import errno
import json
import os
import shutil
import subprocess
import sys
import tempfile
import time
# UTC datetime corresponding to zero Unix timestamp.
EPOCH = datetime.datetime.utcfromtimestamp(0)
def parse_rfc3339_epoch(value):
"""Parses RFC 3339 datetime string as epoch
(as used in Timestamp proto JSON encoding).
Keeps only second precision (dropping micro- and nanoseconds).
Examples of the input:
2017-08-17T04:21:32.722952943Z
1972-01-01T10:00:20.021-05:00
Returns:
epoch timestamp
Raises:
ValueError on errors.
"""
# Adapted from protobuf/internal/well_known_types.py Timestamp.FromJsonString.
# We can't use the original, since it's marked as internal. Also instantiating
# proto messages here to parse a string would been odd.
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ValueError('Failed to parse timestamp: missing valid timezone offset')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.datetime.strptime(second_value, '%Y-%m-%dT%H:%M:%S')
td = date_object - EPOCH
seconds = td.seconds + td.days * 86400
if len(nano_value) > 9:
raise ValueError(
'Failed to parse timestamp: nanos %r more than 9 fractional digits'
% nano_value)
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ValueError('Failed to parse timestamp: invalid trailing data %r'
% value)
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ValueError('Invalid timezone offset value: %r' % timezone)
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
return seconds
def read_json_as_utf8(filename=None, text=None):
"""Read and deserialize a json file or string.
This function is different from json.load and json.loads in that it
returns utf8-encoded string for keys and values instead of unicode.
Args:
filename (str): path of a file to parse
text (str): json string to parse
``filename`` and ``text`` are mutually exclusive. ValueError is raised if
both are provided.
"""
if filename is not None and text is not None:
raise ValueError('Only one of "filename" and "text" can be provided at '
'the same time')
if filename is None and text is None:
raise ValueError('One of "filename" and "text" must be provided')
def to_utf8(obj):
if isinstance(obj, dict):
return {to_utf8(key): to_utf8(value) for key, value in obj.iteritems()}
if isinstance(obj, list):
return [to_utf8(item) for item in obj]
if isinstance(obj, unicode):
return obj.encode('utf-8')
return obj
if filename:
with open(filename, 'rb') as f:
obj = json.load(f)
else:
obj = json.loads(text)
return to_utf8(obj)
# TODO(hinoka): Add tests crbug.com/500781
def rmtree(file_path): # pragma: no cover
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at file_path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
if not os.path.exists(file_path):
return
if os.path.isfile(file_path):
for i in xrange(3):
try:
os.remove(file_path)
return
except OSError:
if i == 2:
raise
time.sleep(3)
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
file_path = os.path.normcase(file_path)
for i in xrange(3):
try:
subprocess.check_call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path])
return
except subprocess.CalledProcessError:
if i == 2:
raise
time.sleep(3)
def remove_with_retry(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
else:
return rmfunc(path)
def rmtree_on_error(function, _, excinfo):
"""This works around a problem whereby python 2.x on Windows has no ability
to check for symbolic links. os.path.islink always returns False. But
shutil.rmtree will fail if invoked on a symbolic link whose target was
deleted before the link. E.g., reproduce like this:
> mkdir test
> mkdir test\1
> mklink /D test\current test\1
> python -c "import infra_libs; infra_libs.rmtree('test')"
To avoid this issue, we pass this error-handling function to rmtree. If
we see the exact sort of failure, we ignore it. All other failures we re-
raise.
"""
exception_type = excinfo[0]
exception_value = excinfo[1]
# If shutil.rmtree encounters a symbolic link on Windows, os.listdir will
# fail with a WindowsError exception with an ENOENT errno (i.e., file not
# found). We'll ignore that error. Note that WindowsError is not defined
# for non-Windows platforms, so we use OSError (of which it is a subclass)
# to avoid lint complaints about an undefined global on non-Windows
# platforms.
if (function is os.listdir) and issubclass(exception_type, OSError):
if exception_value.errno != errno.ENOENT:
raise
else:
raise
for root, dirs, files in os.walk(file_path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0o770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(lambda p: shutil.rmtree(p, onerror=rmtree_on_error),
os.path.join(root, name))
remove_with_retry(os.rmdir, file_path)
# We're trying to be compatible with Python3 tempfile.TemporaryDirectory
# context manager here. And they used 'dir' as a keyword argument.
# pylint: disable=redefined-builtin
@contextlib.contextmanager
def temporary_directory(suffix="", prefix="tmp", dir=None,
keep_directory=False):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with temporary_directory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
Args:
suffix, prefix, dir: same arguments as for tempfile.mkdtemp.
keep_directory (bool): if True, do not delete the temporary directory
when exiting. Useful for debugging.
Returns:
tempdir (str): full path to the temporary directory.
"""
tempdir = None # Handle mkdtemp raising an exception
try:
tempdir = tempfile.mkdtemp(suffix, prefix, dir)
yield tempdir
finally:
if tempdir and not keep_directory: # pragma: no branch
try:
# TODO(pgervais,496347) Make this work reliably on Windows.
shutil.rmtree(tempdir, ignore_errors=True)
except OSError as ex: # pragma: no cover
print >> sys.stderr, (
"ERROR: {!r} while cleaning up {!r}".format(ex, tempdir))
| apache-2.0 |
dschwoerer/espresso | testsuite/python/nonBondedInteractions.py | 3 | 5356 | #
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import numpy as np
from espressomd.interactions import LennardJonesInteraction
if "LENNARD_JONES_GENERIC" in espressomd.features():
from espressomd.interactions import GenericLennardJonesInteraction
class Non_bonded_interactionsTests(ut.TestCase):
# def __init__(self,particleId):
# self.pid=particleId
# Handle to espresso system
es = espressomd.System()
def intersMatch(self, inType, outType, inParams, outParams):
"""Check, if the interaction type set and gotten back as well as the bond
parameters set and gotten back match. Only check keys present in
inParams.
"""
if inType != outType:
print("Type mismatch:", inType, outType)
return False
for k in inParams.keys():
if k not in outParams:
print(k, "missing from returned parameters")
return False
if outParams[k] != inParams[k]:
print("Mismatch in parameter ", k, inParams[k], outParams[k])
return False
return True
def generateTestForNon_bonded_interaction(_partType1, _partType2, _interClass, _params, _interName):
"""Generates test cases for checking interaction parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st and 2nd arg: Particle type ids to check on
3rd: Class of the interaction to test, ie.e, FeneBond, HarmonicBond
4th: Interaction parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
5th: Name of the interaction property to set (i.e. "lennardJones")
"""
partType1 = _partType1
partType2 = _partType2
interClass = _interClass
params = _params
interName = _interName
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Set parameters
getattr(self.es.non_bonded_inter[partType1, partType2], interName).set_params(
**params)
# Read them out again
outInter = getattr(
self.es.non_bonded_inter[partType1, partType2], interName)
outParams = outInter.get_params()
self.assertTrue(self.intersMatch(interClass, type(outInter), params, outParams), interClass(
**params).type_name() + ": value set and value gotten back differ for particle types " + str(partType1) + " and " + str(partType2) + ": " + params.__str__() + " vs. " + outParams.__str__())
return func
test_lj1 = generateTestForNon_bonded_interaction(
0, 0, LennardJonesInteraction,
{"epsilon": 1., "sigma": 2., "cutoff": 3.,
"shift": 4., "offset": 5., "min": 7.},
"lennard_jones")
test_lj2 = generateTestForNon_bonded_interaction(
0, 0, LennardJonesInteraction,
{"epsilon": 1.3, "sigma": 2.2, "cutoff": 3.4,
"shift": 4.1, "offset": 5.1, "min": 7.1},
"lennard_jones")
test_lj3 = generateTestForNon_bonded_interaction(
0, 0, LennardJonesInteraction,
{"epsilon": 1.3, "sigma": 2.2, "cutoff": 3.4,
"shift": 4.1, "offset": 5.1, "min": 7.1},
"lennard_jones")
if "LENNARD_JONES_GENERIC" in espressomd.features():
test_ljgen1 = generateTestForNon_bonded_interaction(
0, 0, GenericLennardJonesInteraction,
{"epsilon": 1., "sigma": 2., "cutoff": 3., "shift": 4., "offset": 5.,
"e1": 7, "e2": 8, "b1": 9., "b2": 10.},
"generic_lennard_jones")
test_ljgen2 = generateTestForNon_bonded_interaction(
0, 0, GenericLennardJonesInteraction,
{"epsilon": 1.1, "sigma": 2.1, "cutoff": 3.1, "shift": 4.1, "offset": 5.1,
"e1": 71, "e2": 81, "b1": 9.1, "b2": 10.1},
"generic_lennard_jones")
test_ljgen3 = generateTestForNon_bonded_interaction(
0, 0, GenericLennardJonesInteraction,
{"epsilon": 1.2, "sigma": 2.2, "cutoff": 3.2, "shift": 4.2, "offset": 5.2,
"e1": 72, "e2": 82, "b1": 9.2, "b2": 10.2},
"generic_lennard_jones")
def test_forcecap(self):
self.es.non_bonded_inter.set_force_cap(17.5)
self.assertEqual(self.es.non_bonded_inter.get_force_cap(), 17.5)
if __name__ == "__main__":
print("Features: ", espressomd.features())
ut.main()
| gpl-3.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.3/django/contrib/admindocs/urls.py | 336 | 1089 | from django.conf.urls.defaults import *
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.doc_index,
name='django-admindocs-docroot'
),
url('^bookmarklets/$',
views.bookmarklets,
name='django-admindocs-bookmarklets'
),
url('^tags/$',
views.template_tag_index,
name='django-admindocs-tags'
),
url('^filters/$',
views.template_filter_index,
name='django-admindocs-filters'
),
url('^views/$',
views.view_index,
name='django-admindocs-views-index'
),
url('^views/(?P<view>[^/]+)/$',
views.view_detail,
name='django-admindocs-views-detail'
),
url('^models/$',
views.model_index,
name='django-admindocs-models-index'
),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.model_detail,
name='django-admindocs-models-detail'
),
url('^templates/(?P<template>.*)/$',
views.template_detail,
name='django-admindocs-templates'
),
)
| mit |
timabbott/zulip | zerver/tests/test_thumbnail.py | 2 | 17251 | import base64
import urllib
from io import StringIO
import ujson
from django.conf import settings
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
create_s3_buckets,
get_test_image_file,
override_settings,
use_s3_backend,
)
from zerver.lib.upload import upload_backend, upload_emoji_image
from zerver.lib.users import get_api_key
class ThumbnailTest(ZulipTestCase):
@use_s3_backend
def test_s3_source_type(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/s3'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
create_s3_buckets(
settings.S3_AUTH_UPLOADS_BUCKET,
settings.S3_AVATAR_BUCKET)
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
# Test full size image.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
# Test custom emoji urls in Zulip messages.
user_profile = self.example_user("hamlet")
image_file = get_test_image_file("img.png")
file_name = "emoji.png"
upload_emoji_image(image_file, file_name, user_profile)
custom_emoji_url = upload_backend.get_emoji_url(file_name, user_profile.realm_id)
emoji_url_base = '/user_avatars/'
self.assertEqual(emoji_url_base, custom_emoji_url[:len(emoji_url_base)])
quoted_emoji_url = urllib.parse.quote(custom_emoji_url[1:], safe='')
# Test full size custom emoji image (for emoji link in messages case).
result = self.client_get(f"/thumbnail?url={quoted_emoji_url}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertIn(custom_emoji_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with standard API auth
self.logout()
result = self.api_get(
hamlet,
f'/thumbnail?url={quoted_uri}&size=full')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("You are not authorized to view this file.", result)
def test_external_source_type(self) -> None:
def run_test_with_image_url(image_url: str) -> None:
# Test full size image.
self.login('hamlet')
quoted_url = urllib.parse.quote(image_url, safe='')
encoded_url = base64.urlsafe_b64encode(image_url.encode()).decode('utf-8')
result = self.client_get(f"/thumbnail?url={quoted_url}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/smart/filters:no_upscale()/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test api endpoint with standard API authentication.
self.logout()
user_profile = self.example_user("hamlet")
result = self.api_get(user_profile,
f"/thumbnail?url={quoted_url}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test api endpoint with legacy API authentication.
user_profile = self.example_user("hamlet")
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail&api_key={get_api_key(user_profile)}")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test a second logged-in user; they should also be able to access it
user_profile = self.example_user("iago")
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail&api_key={get_api_key(user_profile)}")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
# File should be always accessible to user in case of external source
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_url}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/smart/filters:no_upscale()/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
image_url = 'https://images.foobar.com/12345'
run_test_with_image_url(image_url)
image_url = 'http://images.foobar.com/12345'
run_test_with_image_url(image_url)
image_url = '//images.foobar.com/12345'
run_test_with_image_url(image_url)
def test_local_file_type(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/local_file'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
# Test full size image.
# We remove the forward slash infront of the `/user_uploads/` to match
# bugdown behaviour.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
# Test with a unicode filename.
fp = StringIO("zulip!")
fp.name = "μένει.jpg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
# We remove the forward slash infront of the `/user_uploads/` to match
# bugdown behaviour.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test custom emoji urls in Zulip messages.
user_profile = self.example_user("hamlet")
image_file = get_test_image_file("img.png")
file_name = "emoji.png"
upload_emoji_image(image_file, file_name, user_profile)
custom_emoji_url = upload_backend.get_emoji_url(file_name, user_profile.realm_id)
emoji_url_base = '/user_avatars/'
self.assertEqual(emoji_url_base, custom_emoji_url[:len(emoji_url_base)])
quoted_emoji_url = urllib.parse.quote(custom_emoji_url[1:], safe='')
# Test full size custom emoji image (for emoji link in messages case).
result = self.client_get(f"/thumbnail?url={quoted_emoji_url}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertIn(custom_emoji_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with HTTP basic auth.
self.logout()
user_profile = self.example_user("hamlet")
result = self.api_get(
user_profile,
f'/thumbnail?url={quoted_uri}&size=full')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with ?api_key
# auth.
user_profile = self.example_user("hamlet")
result = self.client_get(
f'/thumbnail?url={quoted_uri}&size=full&api_key={get_api_key(user_profile)}')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("You are not authorized to view this file.", result)
@override_settings(THUMBOR_URL='127.0.0.1:9995')
def test_with_static_files(self) -> None:
self.login('hamlet')
uri = '/static/images/cute/turtle.png'
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertEqual(uri, result.url)
def test_with_thumbor_disabled(self) -> None:
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertEqual(uri, result.url)
uri = 'https://www.google.com/images/srpr/logo4w.png'
quoted_uri = urllib.parse.quote(uri, safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'https://external-content.zulipcdn.net/external_content/56c362a24201593891955ff526b3b412c0f9fcd2/68747470733a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
self.assertEqual(base, result.url)
uri = 'http://www.google.com/images/srpr/logo4w.png'
quoted_uri = urllib.parse.quote(uri, safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'https://external-content.zulipcdn.net/external_content/7b6552b60c635e41e8f6daeb36d88afc4eabde79/687474703a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
self.assertEqual(base, result.url)
uri = '//www.google.com/images/srpr/logo4w.png'
quoted_uri = urllib.parse.quote(uri, safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'https://external-content.zulipcdn.net/external_content/676530cf4b101d56f56cc4a37c6ef4d4fd9b0c03/2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
self.assertEqual(base, result.url)
def test_with_different_THUMBOR_URL(self) -> None:
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
with self.settings(THUMBOR_URL='http://test-thumborhost.com'):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'http://test-thumborhost.com/'
self.assertEqual(base, result.url[:len(base)])
expected_part_url = '/smart/filters:no_upscale()/' + hex_uri + '/source_type/local_file'
self.assertIn(expected_part_url, result.url)
def test_with_different_sizes(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/local_file'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
# Test with size supplied as a query parameter.
# size=thumbnail should return a 0x300 sized image.
# size=full should return the original resolution image.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with size supplied as a query parameter where size is anything
# else than 'full' or 'thumbnail'. Result should be an error message.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=480x360")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("Invalid size.", result)
# Test with no size param supplied. In this case as well we show an
# error message.
result = self.client_get(f"/thumbnail?url={quoted_uri}")
self.assertEqual(result.status_code, 400, "Missing 'size' argument")
| apache-2.0 |
cgvarela/grpc | tools/buildgen/bunch.py | 27 | 2417 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Allows dot-accessible dictionaries."""
class Bunch(dict):
def __init__(self, d):
dict.__init__(self, d)
self.__dict__.update(d)
# Converts any kind of variable to a Bunch
def to_bunch(var):
if isinstance(var, list):
return [to_bunch(i) for i in var]
if isinstance(var, dict):
ret = {}
for k, v in var.items():
if isinstance(v, (list, dict)):
v = to_bunch(v)
ret[k] = v
return Bunch(ret)
else:
return var
# Merges JSON 'add' into JSON 'dst'
def merge_json(dst, add):
if isinstance(dst, dict) and isinstance(add, dict):
for k, v in add.items():
if k in dst:
if k == '#': continue
merge_json(dst[k], v)
else:
dst[k] = v
elif isinstance(dst, list) and isinstance(add, list):
dst.extend(add)
else:
raise Exception('Tried to merge incompatible objects %r, %r' % (dst, add))
| bsd-3-clause |
julien78910/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/hark.py | 167 | 1344 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
class HarkIE(InfoExtractor):
_VALID_URL = r'https?://www\.hark\.com/clips/(?P<id>.+?)-.+'
_TEST = {
'url': 'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
'md5': '6783a58491b47b92c7c1af5a77d4cbee',
'info_dict': {
'id': 'mmbzyhkgny',
'ext': 'mp3',
'title': 'Obama: \'Beyond The Afghan Theater, We Only Target Al Qaeda\' on May 23, 2013',
'description': 'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
'duration': 11,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://www.hark.com/clips/%s.json' % video_id, video_id)
return {
'id': video_id,
'url': data['url'],
'title': data['name'],
'description': data.get('description'),
'thumbnail': data.get('image_original'),
'duration': data.get('duration'),
}
| gpl-3.0 |
sloanyang/aquantic | Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py | 122 | 2151 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
class CloseBugForLandDiffTest(unittest.TestCase):
def test_empty_state(self):
capture = OutputCapture()
step = CloseBugForLandDiff(MockTool(), MockOptions())
expected_logs = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n"
capture.assert_outputs(self, step.run, [{"commit_text": "Mock commit text"}], expected_logs=expected_logs)
| gpl-2.0 |
Himon-SYNCRAFT/taskplus | tests/core/shared/test_action.py | 1 | 1595 | from unittest import mock
from taskplus.core.shared.request import RequestError
from taskplus.core.shared.response import ResponseFailure
from taskplus.core.shared.action import Action
def test_action_cannot_process_valid_requests():
request = mock.MagicMock()
request.__bool__.return_value = True
action = Action()
response = action.execute(request)
assert not response
assert response.type == ResponseFailure.SYSTEM_ERROR
msg = 'NotImplementedError: process_request() not implemented by ' +\
'Action class'
assert response.message == msg
def test_action_can_process_invalid_requests_and_returns_response_failure():
parameter = 'parameter'
message = 'message'
request = mock.Mock()
request.is_valid.return_value = False
request.errors = [RequestError(parameter, message)]
action = Action()
response = action.execute(request)
assert not response
assert response.type == ResponseFailure.PARAMETER_ERROR
assert response.message == '{}: {}'.format(parameter, message)
def test_action_can_manage_generic_exception_process_request():
action = Action()
error_message = 'error'
class TestException(Exception):
pass
request = mock.Mock()
request.is_valid.return_value = True
action.process_request = mock.Mock()
action.process_request.side_effect = TestException(error_message)
response = action.execute(request)
assert not response
assert response.type == ResponseFailure.SYSTEM_ERROR
assert response.message == 'TestException: {}'.format(error_message)
| bsd-3-clause |
arne-cl/rst_discourse_parser | tools/crfsuite/crfsuite-0.12/swig/python/setup.py | 2 | 1025 | #!/usr/bin/env python
"""
setup.py file for SWIG example
"""
import sys
import os.path
def get_rootdir():
return ''
#return '/home/users/okazaki/local'
def get_includedir():
return os.path.join(get_rootdir(), 'include')
def get_librarydir():
return os.path.join(get_rootdir(), 'lib')
import os; os.environ['CC'] = 'g++'; os.environ['CXX'] = 'g++';
os.environ['CPP'] = 'g++'; os.environ['LDSHARED'] = 'g++'
from distutils.core import setup, Extension
crfsuite_module = Extension(
'_crfsuite',
sources = [
'crfsuite.cpp',
'export_wrap.cpp',
],
# include_dirs=['../../include',],
extra_link_args=['-shared'],
# library_dirs=['../../lib/crf',],
libraries=['crfsuite'],
# extra_objects=['../../lib/crf/libcrfsuite.la'],
language='c++',
)
setup(
name = '@PACKAGE@',
version = '@VERSION@',
author = 'Naoaki Okazaki',
description = """CRFSuite Python module""",
ext_modules = [crfsuite_module],
py_modules = ["crfsuite"],
)
| bsd-2-clause |
Ayrx/pyDropSecure | src/aes.py | 1 | 2502 | import cPickle as pickle
import hashlib
import hmac
import os
from Crypto.Cipher import AES
class AuthenticationError(Exception):
pass
class Crypticle(object):
"""Authenticated encryption class
Encryption algorithm: AES-CBC
Signing algorithm: HMAC-SHA256
"""
PICKLE_PAD = "pickle::"
AES_BLOCK_SIZE = 16
SIG_SIZE = hashlib.sha256().digest_size
KEY_SIZE = 128
def __init__(self, key_string, key_size=KEY_SIZE):
self.keys = self.extract_keys(key_string, key_size)
self.key_size = key_size
@classmethod
def generate_key_string(cls, key_size=KEY_SIZE):
key = os.urandom(key_size / 8 + cls.SIG_SIZE)
return key.encode("base64").replace("\n", "")
@classmethod
def extract_keys(cls, key_string, key_size):
key = key_string.decode("base64")
assert len(key) == key_size / 8 + cls.SIG_SIZE, "invalid key"
return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:]
def encrypt(self, data):
"""encrypt data with AES-CBC and sign it with HMAC-SHA256"""
aes_key, hmac_key = self.keys
pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE
data += pad * chr(pad)
iv_bytes = os.urandom(self.AES_BLOCK_SIZE)
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = iv_bytes + cypher.encrypt(data)
sig = hmac.new(hmac_key, data, hashlib.sha256).digest()
return data + sig
def decrypt(self, data):
"""verify HMAC-SHA256 signature and decrypt data with AES-CBC"""
aes_key, hmac_key = self.keys
sig = data[-self.SIG_SIZE:]
data = data[:-self.SIG_SIZE]
if hmac.new(hmac_key, data, hashlib.sha256).digest() != sig:
raise AuthenticationError("message authentication failed")
iv_bytes = data[:self.AES_BLOCK_SIZE]
data = data[self.AES_BLOCK_SIZE:]
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = cypher.decrypt(data)
return data[:-ord(data[-1])]
def dumps(self, obj, pickler=pickle):
"""pickle and encrypt a python object"""
return self.encrypt(self.PICKLE_PAD + pickler.dumps(obj))
def loads(self, data, pickler=pickle):
"""decrypt and unpickle a python object"""
data = self.decrypt(data)
# simple integrity check to verify that we got meaningful data
assert data.startswith(self.PICKLE_PAD), "unexpected header"
return pickler.loads(data[len(self.PICKLE_PAD):]) | mit |
eric-stanley/NewsBlur | vendor/paypal/standard/pdt/models.py | 2 | 3434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib import unquote_plus
import urllib2
from django.db import models
from django.conf import settings
from django.http import QueryDict
from django.utils.http import urlencode
from vendor.paypal.standard.models import PayPalStandardBase
from vendor.paypal.standard.conf import POSTBACK_ENDPOINT, SANDBOX_POSTBACK_ENDPOINT
from vendor.paypal.standard.pdt.signals import pdt_successful, pdt_failed
# ### Todo: Move this logic to conf.py:
# if paypal.standard.pdt is in installed apps
# ... then check for this setting in conf.py
class PayPalSettingsError(Exception):
"""Raised when settings are incorrect."""
try:
IDENTITY_TOKEN = settings.PAYPAL_IDENTITY_TOKEN
except:
raise PayPalSettingsError("You must set PAYPAL_IDENTITY_TOKEN in settings.py. Get this token by enabling PDT in your PayPal account.")
class PayPalPDT(PayPalStandardBase):
format = u"<PDT: %s %s>"
amt = models.DecimalField(max_digits=64, decimal_places=2, default=0, blank=True, null=True)
cm = models.CharField(max_length=255, blank=True)
sig = models.CharField(max_length=255, blank=True)
tx = models.CharField(max_length=255, blank=True)
st = models.CharField(max_length=32, blank=True)
class Meta:
db_table = "paypal_pdt"
verbose_name = "PayPal PDT"
def _postback(self):
"""
Perform PayPal PDT Postback validation.
Sends the transaction ID and business token to PayPal which responses with
SUCCESS or FAILED.
"""
postback_dict = dict(cmd="_notify-synch", at=IDENTITY_TOKEN, tx=self.tx)
postback_params = urlencode(postback_dict)
return urllib2.urlopen(self.get_endpoint(), postback_params).read()
def get_endpoint(self):
"""Use the sandbox when in DEBUG mode as we don't have a test_ipn variable in pdt."""
if settings.DEBUG:
return SANDBOX_POSTBACK_ENDPOINT
else:
return POSTBACK_ENDPOINT
def _verify_postback(self):
# ### Now we don't really care what result was, just whether a flag was set or not.
from vendor.paypal.standard.pdt.forms import PayPalPDTForm
result = False
response_list = self.response.split('\n')
response_dict = {}
for i, line in enumerate(response_list):
unquoted_line = unquote_plus(line).strip()
if i == 0:
self.st = unquoted_line
if self.st == "SUCCESS":
result = True
else:
if self.st != "SUCCESS":
self.set_flag(line)
break
try:
if not unquoted_line.startswith(' -'):
k, v = unquoted_line.split('=')
response_dict[k.strip()] = v.strip()
except ValueError, e:
pass
qd = QueryDict('', mutable=True)
qd.update(response_dict)
qd.update(dict(ipaddress=self.ipaddress, st=self.st, flag_info=self.flag_info))
pdt_form = PayPalPDTForm(qd, instance=self)
pdt_form.save(commit=False)
def send_signals(self):
# Send the PDT signals...
if self.flag:
pdt_failed.send(sender=self)
else:
pdt_successful.send(sender=self) | mit |
pulse-vadc/neutron-lbaas-device-driver | setup.py | 2 | 1997 | #!/usr/bin/env python
from distutils.core import setup
import os
from subprocess import check_output
driver_path = None
try:
cmd_output = check_output(
["/usr/bin/locate", "neutron_lbaas/drivers/brocade/driver_v2.py"]
)
if len(cmd_output) > 0:
path_guess = os.path.dirname(cmd_output.split("\n")[0])
confirm = raw_input(
"Path located: %s\n"
"Is this the correct installation path for your Brocade "
"Neutron LBaaS plugin driver? [y/n]" % path_guess
)
if confirm.strip().lower() == "y":
driver_path = path_guess
except:
pass
if driver_path is None:
driver_path = raw_input(
"Please enter the full path of the directory where the Brocade "
"Neutron LBaaS plugin driver should be installed: "
)
setup(
name="brocade_neutron_lbaas",
author="Matthew Geldert",
author_email="mgeldert@brocade.com",
description="Brocade vADC OpenStack Neutron LBaaS Device Driver",
long_description=open("README.md").read(),
version="mitaka",
url="http://www.brocade.com",
packages=[
"brocade_neutron_lbaas",
"brocade_neutron_lbaas_tenant_customizations_db",
"brocade_neutron_lbaas.vtm"
],
scripts=[
"scripts/brocade_lbaas_config_generator",
"scripts/brocade_lbaas_tenant_customization"
],
data_files=[
("/etc/neutron/services/loadbalancer", ["conf/brocade.conf"]),
(driver_path, ["driver_v2.py"])
],
license="Apache Software License",
platforms=["Linux"],
classifiers=[
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Environment :: OpenStack",
"License :: OSI Approved :: Apache Software License"
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7"
]
)
| apache-2.0 |
Charlotte-Morgan/inasafe | safe/report/test/hello_world_report.py | 3 | 2707 | # coding=utf-8
"""Reporting 101."""
import os
import unittest
from safe.common.utilities import safe_dir
from safe.definitions.constants import INASAFE_TEST
from safe.definitions.reports import (
jinja2_component_type,
final_product_tag,
table_product_tag,
html_product_tag)
from safe.impact_function.impact_function import ImpactFunction
from safe.report.impact_report import ImpactReport
from safe.report.processors.default import jinja2_renderer
from safe.report.report_metadata import (
Jinja2ComponentsMetadata,
ReportMetadata)
from safe.test.utilities import get_qgis_app
from safe.utilities.i18n import tr
# first step
def hello_world_extractor(impact_report, component):
print(('Component key: {component_key}'.format(
component_key=component.key)))
context = dict()
context['hello_world'] = "Hello World!"
return context
# second step
hello_world_component = {
'key': 'hello-world',
'type': jinja2_component_type,
'processor': jinja2_renderer,
'extractor': hello_world_extractor,
'output_format': Jinja2ComponentsMetadata.OutputFormat.File,
'output_path': 'hello-world-output.html',
'template': 'hello-world.html',
'tags': [
final_product_tag,
table_product_tag,
html_product_tag
],
'extra_args': {
'header': tr('Hello World!')
}
}
# third step (specifying what components to use)
hello_world_metadata_html = {
'key': 'hello-world-result-html',
'name': 'hello-world-result-html',
'template_folder': safe_dir('report/test/fixtures'),
'components': [hello_world_component]
}
class TestHelloWorldReport(unittest.TestCase):
"""Test about report generation, from scratch."""
@classmethod
def fixtures_dir(cls, path):
"""Helper to return fixture path."""
directory_name = os.path.dirname(__file__)
return os.path.join(directory_name, 'fixtures', path)
def test_hello_world_report(self):
"""Test for creating hello world report.
.. versionadded:: 4.1
"""
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
output_folder = self.fixtures_dir('../output/hello_world_report')
# sneaky monkey patch
ImpactFunction.outputs = ['Not implemented']
impact_function = ImpactFunction()
template_metadata = ReportMetadata(
metadata_dict=hello_world_metadata_html)
impact_report = ImpactReport(
iface=IFACE,
template_metadata=template_metadata,
impact_function=impact_function)
impact_report.output_folder = output_folder
impact_report.process_components()
| gpl-3.0 |
GuillaumeGomez/servo | tests/wpt/web-platform-tests/cors/resources/cors-makeheader.py | 122 | 2224 | import json
def main(request, response):
origin = request.GET.first("origin", request.headers.get('origin'))
if "check" in request.GET:
token = request.GET.first("token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first("check", None) == "keep":
request.server.stash.put(token, value)
body = "1"
else:
body = "0"
return [("Content-Type", "text/plain")], body
if origin != 'none':
response.headers.set("Access-Control-Allow-Origin", origin)
if 'origin2' in request.GET:
response.headers.append("Access-Control-Allow-Origin", request.GET.first('origin2'))
#Preflight
if 'headers' in request.GET:
response.headers.set("Access-Control-Allow-Headers", request.GET.first('headers'))
if 'credentials' in request.GET:
response.headers.set("Access-Control-Allow-Credentials", request.GET.first('credentials'))
if 'methods' in request.GET:
response.headers.set("Access-Control-Allow-Methods", request.GET.first('methods'))
code_raw = request.GET.first('code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == 'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if 'preflight' in request.GET:
code = int(request.GET.first('preflight'))
#Log that the preflight actually happened if we have an ident
if 'token' in request.GET:
request.server.stash.put(request.GET['token'], True)
if 'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set("Location", request.GET.first('location'))
headers = {}
for name, values in request.headers.iteritems():
if len(values) == 1:
headers[name] = values[0]
else:
#I have no idea, really
headers[name] = values
headers['get_value'] = request.GET.first('get_value', '')
body = json.dumps(headers)
if code:
return (code, "StatusText"), [], body
else:
return body
| mpl-2.0 |
xiaojunwu/crosswalk-test-suite | webapi/webapi-nativefilesystem-xwalk-tests/inst.xpk.py | 187 | 5919 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
gregswift/ansible-modules-extras | system/ohai.py | 86 | 1695 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: ohai
short_description: Returns inventory data from I(Ohai)
description:
- Similar to the M(facter) module, this runs the I(Ohai) discovery program
(U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and
returns JSON inventory data.
I(Ohai) data is a bit more verbose and nested than I(facter).
version_added: "0.6"
options: {}
notes: []
requirements: [ "ohai" ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
# Retrieve (ohai) data from all Web servers and store in one-file per host
ansible webservers -m ohai --tree=/tmp/ohaidata
'''
def main():
module = AnsibleModule(
argument_spec = dict()
)
cmd = ["/usr/bin/env", "ohai"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
vlegoff/tsunami | src/primaires/pnj/commandes/chemin/supprimer.py | 1 | 2940 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'supprimer' de la commande 'chemin'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmSupprimer(Parametre):
"""Commande 'chemin supprimer'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "supprimer", "del")
self.schema = "<cle>"
self.aide_courte = "supprime un chemin pour PNJ"
self.aide_longue = \
"Cette commande permet simplement de supprimer un " \
"chemin pour PNJ. Vous devez préciser sa clé. Soyez " \
"prudent cependant : si un PNJ emprunte actuellement le " \
"chemin que vous voulez supprimer, le PNJ continuera de " \
"parcourir le chemin, même si il n'existe plus."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
cle = self.noeud.get_masque("cle")
cle.proprietes["regex"] = r"'[a-z0-9_:]{3,}'"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
cle = dic_masques["cle"].cle
if cle not in importeur.pnj.chemins:
personnage << "|err|Ce chemin n'existe pas.|ff|"
return
importeur.pnj.supprimer_chemin(cle)
personnage << "Le chemin {} a bien été supprimé.".format(cle)
| bsd-3-clause |
nichung/wwwflaskBlogrevA | env/lib/python2.7/site-packages/pymongo/database.py | 19 | 44900 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database level operations."""
import warnings
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.py3compat import iteritems, string_type, _unicode
from bson.son import SON
from pymongo import auth, common
from pymongo.collection import Collection
from pymongo.command_cursor import CommandCursor
from pymongo.errors import (CollectionInvalid,
ConfigurationError,
InvalidName,
OperationFailure)
from pymongo.helpers import _first_batch
from pymongo.read_preferences import ReadPreference
from pymongo.son_manipulator import SONManipulator
from pymongo.write_concern import WriteConcern
def _check_name(name):
"""Check if a database name is valid.
"""
if not name:
raise InvalidName("database name cannot be the empty string")
for invalid_char in [" ", ".", "$", "/", "\\", "\x00"]:
if invalid_char in name:
raise InvalidName("database names cannot contain the "
"character %r" % invalid_char)
class Database(common.BaseObject):
"""A Mongo database.
"""
def __init__(self, client, name, codec_options=None,
read_preference=None, write_concern=None):
"""Get a database by client and name.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
database name.
:Parameters:
- `client`: A :class:`~pymongo.mongo_client.MongoClient` instance.
- `name`: The database name.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) client.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) client.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) client.write_concern is used.
.. mongodoc:: databases
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
:class:`~pymongo.database.Database` no longer returns an instance
of :class:`~pymongo.collection.Collection` for attribute names
with leading underscores. You must use dict-style lookups instead::
db['__my_collection__']
Not:
db.__my_collection__
"""
super(Database, self).__init__(
codec_options or client.codec_options,
read_preference or client.read_preference,
write_concern or client.write_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if name != '$external':
_check_name(name)
self.__name = _unicode(name)
self.__client = client
self.__incoming_manipulators = []
self.__incoming_copying_manipulators = []
self.__outgoing_manipulators = []
self.__outgoing_copying_manipulators = []
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
**DEPRECATED** - `add_son_manipulator` is deprecated.
.. versionchanged:: 3.0
Deprecated add_son_manipulator.
"""
warnings.warn("add_son_manipulator is deprecated",
DeprecationWarning, stacklevel=2)
base = SONManipulator()
def method_overwritten(instance, method):
"""Test if this method has been overridden."""
return (getattr(
instance, method).__func__ != getattr(base, method).__func__)
if manipulator.will_copy():
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_copying_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_copying_manipulators.insert(0, manipulator)
else:
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_manipulators.insert(0, manipulator)
@property
def system_js(self):
"""A :class:`SystemJS` helper for this :class:`Database`.
See the documentation for :class:`SystemJS` for more details.
"""
return SystemJS(self)
@property
def client(self):
"""The client instance for this :class:`Database`."""
return self.__client
@property
def name(self):
"""The name of this :class:`Database`."""
return self.__name
@property
def incoming_manipulators(self):
"""All incoming SON manipulators installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_manipulators]
@property
def incoming_copying_manipulators(self):
"""All incoming SON copying manipulators installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_copying_manipulators]
@property
def outgoing_manipulators(self):
"""List all outgoing SON manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_manipulators]
@property
def outgoing_copying_manipulators(self):
"""List all outgoing SON copying manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_copying_manipulators]
def __eq__(self, other):
if isinstance(other, Database):
return (self.__client == other.client and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "Database(%r, %r)" % (self.__client, self.__name)
def __getattr__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
raise AttributeError(
"Database has no attribute %r. To access the %s"
" collection, use database[%r]." % (name, name, name))
return self.__getitem__(name)
def __getitem__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return Collection(self, name)
def get_collection(self, name, codec_options=None,
read_preference=None, write_concern=None):
"""Get a :class:`~pymongo.collection.Collection` with the given name
and options.
Useful for creating a :class:`~pymongo.collection.Collection` with
different codec options, read preference, and/or write concern from
this :class:`Database`.
>>> db.read_preference
Primary()
>>> coll1 = db.test
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = db.get_collection(
... 'test', read_preference=ReadPreference.SECONDARY)
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `name`: The name of the collection - a string.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
"""
return Collection(
self, name, False, codec_options, read_preference, write_concern)
def create_collection(self, name, codec_options=None,
read_preference=None, write_concern=None, **kwargs):
"""Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this method. Supported
options vary with MongoDB release. Some examples include:
- "size": desired initial size for the collection (in
bytes). For capped collections this size is the max
size of the collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `name`: the name of the collection to create
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
.. versionchanged:: 2.2
Removed deprecated argument: options
"""
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, True, codec_options,
read_preference, write_concern, **kwargs)
def _apply_incoming_manipulators(self, son, collection):
"""Apply incoming manipulators to `son`."""
for manipulator in self.__incoming_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _apply_incoming_copying_manipulators(self, son, collection):
"""Apply incoming copying manipulators to `son`."""
for manipulator in self.__incoming_copying_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _fix_incoming(self, son, collection):
"""Apply manipulators to an incoming SON object before it gets stored.
:Parameters:
- `son`: the son object going into the database
- `collection`: the collection the son object is being saved in
"""
son = self._apply_incoming_manipulators(son, collection)
son = self._apply_incoming_copying_manipulators(son, collection)
return son
def _fix_outgoing(self, son, collection):
"""Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
"""
for manipulator in reversed(self.__outgoing_manipulators):
son = manipulator.transform_outgoing(son, collection)
for manipulator in reversed(self.__outgoing_copying_manipulators):
son = manipulator.transform_outgoing(son, collection)
return son
def _command(self, sock_info, command, slave_ok=False, value=1, check=True,
allowable_errors=None, read_preference=ReadPreference.PRIMARY,
codec_options=CodecOptions(), **kwargs):
"""Internal command helper."""
if isinstance(command, string_type):
command = SON([(command, value)])
command.update(kwargs)
return sock_info.command(self.__name,
command,
slave_ok,
read_preference,
codec_options,
check,
allowable_errors)
def command(self, command, value=1, check=True,
allowable_errors=None, read_preference=ReadPreference.PRIMARY,
codec_options=CodecOptions(), **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `read_preference`: The read preference for this operation.
See :mod:`~pymongo.read_preferences` for options.
- `codec_options`: A :class:`~bson.codec_options.CodecOptions`
instance.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: :meth:`command` does **not** obey :attr:`read_preference`
or :attr:`codec_options`. You must use the `read_preference` and
`codec_options` parameters instead.
.. versionchanged:: 3.0
Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`,
and `secondary_acceptable_latency_ms` option.
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Added the `codec_options` parameter.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: commands
"""
client = self.__client
with client._socket_for_reads(read_preference) as (sock_info, slave_ok):
return self._command(sock_info, command, slave_ok, value,
check, allowable_errors, read_preference,
codec_options, **kwargs)
def _list_collections(self, sock_info, slave_okay, criteria=None):
"""Internal listCollections helper."""
criteria = criteria or {}
if sock_info.max_wire_version > 2:
cmd = SON([("listCollections", 1), ("cursor", {})])
if criteria:
cmd["filter"] = criteria
coll = self["$cmd"]
cursor = self._command(sock_info, cmd, slave_okay)["cursor"]
return CommandCursor(coll, cursor, sock_info.address)
else:
coll = self["system.namespaces"]
res = _first_batch(sock_info, coll.full_name,
criteria, 0, slave_okay,
CodecOptions(), ReadPreference.PRIMARY)
data = res["data"]
cursor = {
"id": res["cursor_id"],
"firstBatch": data,
"ns": coll.full_name,
}
# Need to tell the cursor how many docs were in the first batch.
return CommandCursor(coll, cursor, sock_info.address, len(data))
def collection_names(self, include_system_collections=True):
"""Get a list of all the collection names in this database.
:Parameters:
- `include_system_collections` (optional): if ``False`` list
will not include system collections (e.g ``system.indexes``)
"""
with self.__client._socket_for_reads(
ReadPreference.PRIMARY) as (sock_info, slave_okay):
results = self._list_collections(sock_info, slave_okay)
names = [result["name"] for result in results]
if sock_info.max_wire_version <= 2:
# MongoDB 2.4 and older return index namespaces and collection
# namespaces prefixed with the database name.
names = [n[len(self.__name) + 1:] for n in names
if n.startswith(self.__name + ".") and "$" not in n]
if not include_system_collections:
names = [name for name in names if not name.startswith("system.")]
return names
def drop_collection(self, name_or_collection):
"""Drop a collection.
:Parameters:
- `name_or_collection`: the name of a collection to drop or the
collection object itself
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_collection must be an "
"instance of %s" % (string_type.__name__,))
self.__client._purge_index(self.__name, name)
self.command("drop", _unicode(name), allowable_errors=["ns not found"])
def validate_collection(self, name_or_collection,
scandata=False, full=False):
"""Validate a collection.
Returns a dict of validation info. Raises CollectionInvalid if
validation fails.
With MongoDB < 1.9 the result dict will include a `result` key
with a string value that represents the validation results. With
MongoDB >= 1.9 the `result` key no longer exists and the results
are split into individual fields in the result dict.
:Parameters:
- `name_or_collection`: A Collection object or the name of a
collection to validate.
- `scandata`: Do extra checks beyond checking the overall
structure of the collection.
- `full`: Have the server do a more thorough scan of the
collection. Use with `scandata` for a thorough scan
of the structure of the collection and the individual
documents. Ignored in MongoDB versions before 1.9.
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_collection must be an instance of "
"%s or Collection" % (string_type.__name__,))
result = self.command("validate", _unicode(name),
scandata=scandata, full=full)
valid = True
# Pre 1.9 results
if "result" in result:
info = result["result"]
if info.find("exception") != -1 or info.find("corrupt") != -1:
raise CollectionInvalid("%s invalid: %s" % (name, info))
# Sharded results
elif "raw" in result:
for _, res in iteritems(result["raw"]):
if "result" in res:
info = res["result"]
if (info.find("exception") != -1 or
info.find("corrupt") != -1):
raise CollectionInvalid("%s invalid: "
"%s" % (name, info))
elif not res.get("valid", False):
valid = False
break
# Post 1.9 non-sharded results.
elif not result.get("valid", False):
valid = False
if not valid:
raise CollectionInvalid("%s invalid: %r" % (name, result))
return result
def current_op(self, include_all=False):
"""Get information on operations currently running.
:Parameters:
- `include_all` (optional): if ``True`` also list currently
idle operations in the result
"""
coll = self.get_collection(
"$cmd.sys.inprog", read_preference=ReadPreference.PRIMARY)
if include_all:
return coll.find_one({"$all": True})
else:
return coll.find_one()
def profiling_level(self):
"""Get the database's current profiling level.
Returns one of (:data:`~pymongo.OFF`,
:data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
result = self.command("profile", -1)
assert result["was"] >= 0 and result["was"] <= 2
return result["was"]
def set_profiling_level(self, level, slow_ms=None):
"""Set the database's profiling level.
:Parameters:
- `level`: Specifies a profiling level, see list of possible values
below.
- `slow_ms`: Optionally modify the threshold for the profile to
consider a query or operation. Even if the profiler is off queries
slower than the `slow_ms` level will get written to the logs.
Possible `level` values:
+----------------------------+------------------------------------+
| Level | Setting |
+============================+====================================+
| :data:`~pymongo.OFF` | Off. No profiling. |
+----------------------------+------------------------------------+
| :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. |
+----------------------------+------------------------------------+
| :data:`~pymongo.ALL` | On. Includes all operations. |
+----------------------------+------------------------------------+
Raises :class:`ValueError` if level is not one of
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
:data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
if not isinstance(level, int) or level < 0 or level > 2:
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
if slow_ms is not None and not isinstance(slow_ms, int):
raise TypeError("slow_ms must be an integer")
if slow_ms is not None:
self.command("profile", level, slowms=slow_ms)
else:
self.command("profile", level)
def profiling_info(self):
"""Returns a list containing current profiling information.
.. mongodoc:: profiling
"""
return list(self["system.profile"].find())
def error(self):
"""**DEPRECATED**: Get the error if one occurred on the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("Database.error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getlasterror")
error_msg = error.get("err", "")
if error_msg is None:
return None
if error_msg.startswith("not master"):
# Reset primary server and request check, if another thread isn't
# doing so already.
primary = self.__client.primary
if primary:
self.__client._reset_server_and_request_check(primary)
return error
def last_status(self):
"""**DEPRECATED**: Get status information from the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
Returns a SON object with status information.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("last_status() is deprecated",
DeprecationWarning, stacklevel=2)
return self.command("getlasterror")
def previous_error(self):
"""**DEPRECATED**: Get the most recent error on this database.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
Only returns errors that have occurred since the last call to
:meth:`reset_error_history`. Returns None if no such errors have
occurred.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("previous_error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getpreverror")
if error.get("err", 0) is None:
return None
return error
def reset_error_history(self):
"""**DEPRECATED**: Reset the error history of this database.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
Calls to :meth:`previous_error` will only return errors that have
occurred since the most recent call to this method.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("reset_error_history() is deprecated",
DeprecationWarning, stacklevel=2)
self.command("reseterror")
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Database' object is not iterable")
next = __next__
def _default_role(self, read_only):
"""Return the default user role for this database."""
if self.name == "admin":
if read_only:
return "readAnyDatabase"
else:
return "root"
else:
if read_only:
return "read"
else:
return "dbOwner"
def _create_or_update_user(
self, create, name, password, read_only, **kwargs):
"""Use a command to create (if create=True) or modify a user.
"""
opts = {}
if read_only or (create and "roles" not in kwargs):
warnings.warn("Creating a user with the read_only option "
"or without roles is deprecated in MongoDB "
">= 2.6", DeprecationWarning)
opts["roles"] = [self._default_role(read_only)]
elif read_only:
warnings.warn("The read_only option is deprecated in MongoDB "
">= 2.6, use 'roles' instead", DeprecationWarning)
if password is not None:
# We always salt and hash client side.
if "digestPassword" in kwargs:
raise ConfigurationError("The digestPassword option is not "
"supported via add_user. Please use "
"db.command('createUser', ...) "
"instead for this option.")
opts["pwd"] = auth._password_digest(name, password)
opts["digestPassword"] = False
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
opts["writeConcern"] = self.write_concern.document
opts.update(kwargs)
if create:
command_name = "createUser"
else:
command_name = "updateUser"
self.command(command_name, name, **opts)
def _legacy_add_user(self, name, password, read_only, **kwargs):
"""Uses v1 system to add users, i.e. saving to system.users.
"""
user = self.system.users.find_one({"user": name}) or {"user": name}
if password is not None:
user["pwd"] = auth._password_digest(name, password)
if read_only is not None:
user["readOnly"] = read_only
user.update(kwargs)
# We don't care what the _id is, only that it has one
# for the replace_one call below.
user.setdefault("_id", ObjectId())
coll = self.system.users
if not self.write_concern.acknowledged:
coll = coll.with_options(write_concern=WriteConcern())
try:
coll.replace_one({"_id": user["_id"]}, user, True)
except OperationFailure as exc:
# First admin user add fails gle in MongoDB >= 2.1.2
# See SERVER-4225 for more information.
if 'login' in str(exc):
pass
# First admin user add fails gle from mongos 2.0.x
# and 2.2.x.
elif (exc.details and
'getlasterror' in exc.details.get('note', '')):
pass
else:
raise
def add_user(self, name, password=None, read_only=None, **kwargs):
"""Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password` (optional): the password of the user to create. Can not
be used with the ``userSource`` argument.
- `read_only` (optional): if ``True`` the user will be read only
- `**kwargs` (optional): optional fields for the user document
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
for more information.
.. note:: The use of optional keyword arguments like ``userSource``,
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
.. versionchanged:: 2.5
Added kwargs support for optional fields introduced in MongoDB 2.4
.. versionchanged:: 2.2
Added support for read only users
"""
if not isinstance(name, string_type):
raise TypeError("name must be an "
"instance of %s" % (string_type.__name__,))
if password is not None:
if not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if len(password) == 0:
raise ValueError("password can't be empty")
if read_only is not None:
read_only = common.validate_boolean('read_only', read_only)
if 'roles' in kwargs:
raise ConfigurationError("Can not use "
"read_only and roles together")
try:
uinfo = self.command("usersInfo", name)
# Create the user if not found in uinfo, otherwise update one.
self._create_or_update_user(
(not uinfo["users"]), name, password, read_only, **kwargs)
except OperationFailure as exc:
# MongoDB >= 2.5.3 requires the use of commands to manage
# users.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
self._legacy_add_user(name, password, read_only, **kwargs)
return
# Unauthorized. Attempt to create the user in case of
# localhost exception.
elif exc.code == 13:
self._create_or_update_user(
True, name, password, read_only, **kwargs)
else:
raise
def remove_user(self, name):
"""Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
:Parameters:
- `name`: the name of the user to remove
"""
try:
cmd = SON([("dropUser", name)])
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
cmd["writeConcern"] = self.write_concern.document
self.command(cmd)
except OperationFailure as exc:
# See comment in add_user try / except above.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
coll = self.system.users
if not self.write_concern.acknowledged:
coll = coll.with_options(write_concern=WriteConcern())
coll.delete_one({"user": name})
return
raise
def authenticate(self, name, password=None,
source=None, mechanism='DEFAULT', **kwargs):
"""Authenticate to use this database.
Authentication lasts for the life of the underlying client
instance, or until :meth:`logout` is called.
Raises :class:`TypeError` if (required) `name`, (optional) `password`,
or (optional) `source` is not an instance of :class:`basestring`
(:class:`str` in python 3).
.. note::
- This method authenticates the current connection, and
will also cause all new :class:`~socket.socket` connections
in the underlying client instance to be authenticated automatically.
- Authenticating more than once on the same database with different
credentials is not supported. You must call :meth:`logout` before
authenticating with new credentials.
- When sharing a client instance between multiple threads, all
threads will share the authentication. If you need different
authentication profiles for different purposes you must use
distinct client instances.
:Parameters:
- `name`: the name of the user to authenticate.
- `password` (optional): the password of the user to authenticate.
Not used with GSSAPI or MONGODB-X509 authentication.
- `source` (optional): the database to authenticate on. If not
specified the current database is used.
- `mechanism` (optional): See
:data:`~pymongo.auth.MECHANISMS` for options.
By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,
MONGODB-CR (MongoDB Challenge Response protocol) for older servers.
- `authMechanismProperties` (optional): Used to specify
authentication mechanism specific options. To specify the service
name for GSSAPI authentication pass
authMechanismProperties='SERVICE_NAME:<service name>'
.. versionadded:: 2.8
Use SCRAM-SHA-1 with MongoDB 3.0 and later.
.. versionchanged:: 2.5
Added the `source` and `mechanism` parameters. :meth:`authenticate`
now raises a subclass of :class:`~pymongo.errors.PyMongoError` if
authentication fails due to invalid credentials or configuration
issues.
.. mongodoc:: authenticate
"""
if not isinstance(name, string_type):
raise TypeError("name must be an "
"instance of %s" % (string_type.__name__,))
if password is not None and not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if source is not None and not isinstance(source, string_type):
raise TypeError("source must be an "
"instance of %s" % (string_type.__name__,))
common.validate_auth_mechanism('mechanism', mechanism)
validated_options = {}
for option, value in iteritems(kwargs):
normalized, val = common.validate_auth_option(option, value)
validated_options[normalized] = val
credentials = auth._build_credentials_tuple(
mechanism,
source or self.name,
name,
password,
validated_options)
self.client._cache_credentials(
self.name,
credentials,
connect=True)
return True
def logout(self):
"""Deauthorize use of this database for this client instance."""
# Sockets will be deauthenticated as they are used.
self.client._purge_credentials(self.name)
def dereference(self, dbref, **kwargs):
"""Dereference a :class:`~bson.dbref.DBRef`, getting the
document it points to.
Raises :class:`TypeError` if `dbref` is not an instance of
:class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if
the reference does not point to a valid document. Raises
:class:`ValueError` if `dbref` has a database specified that
is different from the current database.
:Parameters:
- `dbref`: the reference
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to
:meth:`~pymongo.collection.Collection.find`.
"""
if not isinstance(dbref, DBRef):
raise TypeError("cannot dereference a %s" % type(dbref))
if dbref.database is not None and dbref.database != self.__name:
raise ValueError("trying to dereference a DBRef that points to "
"another database (%r not %r)" % (dbref.database,
self.__name))
return self[dbref.collection].find_one({"_id": dbref.id}, **kwargs)
def eval(self, code, *args):
"""Evaluate a JavaScript expression in MongoDB.
Useful if you need to touch a lot of data lightly; in such a
scenario the network transfer of the data could be a
bottleneck. The `code` argument must be a JavaScript
function. Additional positional arguments will be passed to
that function when it is run on the server.
Raises :class:`TypeError` if `code` is not an instance of
:class:`basestring` (:class:`str` in python 3) or `Code`.
Raises :class:`~pymongo.errors.OperationFailure` if the eval
fails. Returns the result of the evaluation.
:Parameters:
- `code`: string representation of JavaScript code to be
evaluated
- `args` (optional): additional positional arguments are
passed to the `code` being evaluated
"""
if not isinstance(code, Code):
code = Code(code)
result = self.command("$eval", code, args=args)
return result.get("retval", None)
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
raise TypeError("'Database' object is not callable. If you meant to "
"call the '%s' method on a '%s' object it is "
"failing because no such method exists." % (
self.__name, self.__client.__class__.__name__))
class SystemJS(object):
"""Helper class for dealing with stored JavaScript.
"""
def __init__(self, database):
"""Get a system js helper for the database `database`.
An instance of :class:`SystemJS` can be created with an instance
of :class:`Database` through :attr:`Database.system_js`,
manual instantiation of this class should not be necessary.
:class:`SystemJS` instances allow for easy manipulation and
access to server-side JavaScript:
.. doctest::
>>> db.system_js.add1 = "function (x) { return x + 1; }"
>>> db.system.js.find({"_id": "add1"}).count()
1
>>> db.system_js.add1(5)
6.0
>>> del db.system_js.add1
>>> db.system.js.find({"_id": "add1"}).count()
0
"""
if not database.write_concern.acknowledged:
database = database.client.get_database(
database.name, write_concern=WriteConcern())
# can't just assign it since we've overridden __setattr__
object.__setattr__(self, "_db", database)
def __setattr__(self, name, code):
self._db.system.js.replace_one(
{"_id": name}, {"_id": name, "value": Code(code)}, True)
def __setitem__(self, name, code):
self.__setattr__(name, code)
def __delattr__(self, name):
self._db.system.js.delete_one({"_id": name})
def __delitem__(self, name):
self.__delattr__(name)
def __getattr__(self, name):
return lambda *args: self._db.eval(Code("function() { "
"return this[name].apply("
"this, arguments); }",
scope={'name': name}), *args)
def __getitem__(self, name):
return self.__getattr__(name)
def list(self):
"""Get a list of the names of the functions stored in this database."""
return [x["_id"] for x in self._db.system.js.find(projection=["_id"])]
| mit |
rsteca/python-social-auth | social/tests/backends/test_soundcloud.py | 92 | 1631 | import json
from social.tests.backends.oauth import OAuth2Test
class SoundcloudOAuth2Test(OAuth2Test):
backend_path = 'social.backends.soundcloud.SoundcloudOAuth2'
user_data_url = 'https://api.soundcloud.com/me.json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'website': None,
'myspace_name': None,
'public_favorites_count': 0,
'followings_count': 0,
'full_name': 'Foo Bar',
'id': 10101010,
'city': None,
'track_count': 0,
'playlist_count': 0,
'discogs_name': None,
'private_tracks_count': 0,
'followers_count': 0,
'online': True,
'username': 'foobar',
'description': None,
'subscriptions': [],
'kind': 'user',
'quota': {
'unlimited_upload_quota': False,
'upload_seconds_left': 7200,
'upload_seconds_used': 0
},
'website_title': None,
'primary_email_confirmed': False,
'permalink_url': 'http://soundcloud.com/foobar',
'private_playlists_count': 0,
'permalink': 'foobar',
'upload_seconds_left': 7200,
'country': None,
'uri': 'https://api.soundcloud.com/users/10101010',
'avatar_url': 'https://a1.sndcdn.com/images/'
'default_avatar_large.png?ca77017',
'plan': 'Free'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| bsd-3-clause |
jchevin/MissionPlanner-master | Lib/wsgiref/headers.py | 229 | 5879 | """Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
from types import ListType, TupleType
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if type(headers) is not ListType:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%r)" % self._headers
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
| gpl-3.0 |
richardvida/googletest | test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
neutralio/nio-cli | tests/test_buildrelease.py | 2 | 1172 | import unittest
from unittest import skipIf
try:
from nio_cli.commands.buildrelease import BuildRelease
niocore_installed = True
except:
niocore_installed = False
class TestCLI(unittest.TestCase):
@skipIf(not niocore_installed, 'niocore required for buildrelease')
def test_buildrelease_git_remote_url_parse(self):
"""Support multiple repo url formats from `git remote -v`"""
buildrelease = BuildRelease({
'<repo-name>': '', '--ip': '', '--port': '',
})
parse = buildrelease.parse_url_from_git_remote_command
self.assertEqual(
parse(b"origin git@github.com:nio-blocks/repo.git (fetch)"),
"git://github.com/nio-blocks/repo.git")
self.assertEqual(
parse(b"origin git@github.com:/nio-blocks/repo (fetch)"),
"git://github.com/nio-blocks/repo.git")
self.assertEqual(
parse(b"origin https://github.com/nio-blocks/repo (fetch)"),
"git://github.com/nio-blocks/repo.git")
self.assertEqual(
parse(b"origin https://1.2.3.4/nio-blocks/repo (fetch)"),
"git://1.2.3.4/nio-blocks/repo.git")
| apache-2.0 |
x4dr/NossiNet | NossiInterface/reminder.py | 1 | 5119 | import pathlib
import re
import sqlite3
import time
from datetime import datetime, timedelta, tzinfo
from typing import List, Tuple
import discord
import pytz
from dateutil.tz import gettz
from pytz import reference
last = {}
delete = []
date_formats = [
"%d.%m.%y %H:%M:%S",
"%d.%m.%Y %H:%M:%S",
"%d.%m.%Y",
"%d.%m %H:%M:%S",
"%d.%m.%Y %H:%M",
"%d.%m %H:%M",
"%d.%m",
"%H:%M:%S",
"%H:%M",
"%y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d",
"%y-%m-%d %H:%M",
"%Y-%m-%d %H:%M",
]
days = r"(?P<days>\d+)\s*([d:]|days?)"
hours = r"(?P<hours>\d+)\s*([h:]|hours?)"
mins = r"(?P<minutes>\d+)\s*([m:]|min(utes?)?)"
seconds = r"(?P<seconds>\d+)\s*(s(ec(onds?))?)"
def setup_db():
path = pathlib.Path(__file__).parent / "remind.db"
con = sqlite3.connect(path)
con.executescript(
"CREATE TABLE IF NOT EXISTS reminders ("
"id integer PRIMARY KEY,"
"channel int NOT NULL,"
"executiondate DATE,"
"message TEXT);"
"CREATE TABLE IF NOT EXISTS tz ("
"id integer PRIMARY KEY,"
"user int NOT NULL UNIQUE,"
"tzname TEXT);"
)
return con
reminddb = setup_db()
def next_reminders():
cur = reminddb.cursor()
return cur.execute(
"SELECT * FROM reminders ORDER BY executiondate LIMIT 3"
).fetchall()
def save_reminder(date, channel, message):
cur = reminddb.cursor()
cur.execute(
"INSERT INTO reminders(executiondate,channel,message) VALUES (?,?,?)",
(date, channel, message),
)
reminddb.commit()
def set_user_tz(user: int, tzname: str):
cur = reminddb.cursor()
if not gettz(tzname):
raise ValueError("Not a valid tz!")
cur.execute(
"INSERT OR REPLACE INTO tz(user,tzname) VALUES (?,?)",
(user, tzname),
)
reminddb.commit()
def get_user_tz(user: int) -> tzinfo:
cur = reminddb.cursor()
tzname = cur.execute("SELECT tzname FROM tz WHERE user = ?", (user,)).fetchone()
if tzname:
return gettz(tzname[0])
raise KeyError("no tz!")
def extract_time_delta(inp: str, userid: int):
inp = inp.strip()
if inp.startswith("in") or any(x in inp[:8] for x in "dhms"):
inp = inp[2:].strip() if inp.startswith("in") else inp
rel = (
re.match(r"\s*".join((days, hours, mins, seconds)) + r"?\s*", inp)
or re.match(r"\s*".join((hours, mins, seconds)) + r"?\s*", inp)
or re.match(r"\s*".join((hours, mins)) + r"?\s*", inp)
or re.match(seconds + r"\s*", inp) # seconds without optional ending
or re.match(
r"(?=\d)((?P<days>\d+)\s*d\s*)?"
r"((?P<hours>\d+)\s*h\s*)?"
r"((?P<minutes>\d+)\s*m?\s*)?"
r"((?P<seconds>\d+)\s*s?)?\s*",
inp,
)
)
if rel:
msg = inp[rel.end() :]
d = int(rel.groupdict().get("days") or 0)
h = int(rel.groupdict().get("hours") or 0)
m = int(rel.groupdict().get("minutes") or 0)
s = int(rel.groupdict().get("seconds") or 0)
return d * 24 * 3600 + h * 3600 + m * 60 + s, msg
else:
inp = inp.removeprefix("at").removeprefix("on")
date = re.match(r"^(?P<complete>[0-9.: -]*)", inp)
msg = inp[len(date.group("complete")) :]
tz = get_user_tz(userid)
for fmt in date_formats:
try:
d = datetime.strptime(date.group("complete").strip(), fmt)
if d.year == 1900:
d = d.combine(datetime.now().date(), d.time())
if d < datetime.now():
d += timedelta(days=1)
d = d.replace(tzinfo=tz)
d = d.astimezone(pytz.utc)
return (
d.timestamp() - time.time(),
msg,
)
except Exception:
continue
try:
return int(date.group("complete")) * 60, msg # minutes by default
except ValueError:
raise ValueError("unrecognizeable format:" + inp)
def newreminder(message: discord.Message, msg: str, userid: int):
msg = msg.strip()
if msg.startswith("me "):
msg = msg[3:]
mention = message.author.mention + " "
else:
mention = ""
relatime, msg = extract_time_delta(msg, message.author.id)
msg = msg.removeprefix("that").strip()
if msg.lower().startswith("i "):
msg = "You" + msg[1:]
date = time.time() + relatime
save_reminder(date, message.channel.id, mention + msg)
tz = get_user_tz(userid)
return datetime.fromtimestamp(int(date), tz if tz else reference.LocalTimezone())
def delreminder(reminder_id):
cur = reminddb.cursor()
cur.execute(
"DELETE FROM reminders WHERE id=?",
(reminder_id,),
)
reminddb.commit()
def listreminder(message: discord.Message) -> List[Tuple[int, int, float, str]]:
cur = reminddb.cursor()
return cur.execute(
"SELECT id, channel, executiondate, message FROM reminders WHERE channel=?",
(message.channel.id,),
).fetchall()
| gpl-2.0 |
gilneidp/TADD | lib/packet/packet_base.py | 46 | 6226 | # Copyright 2011 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
import logging
lg = logging.getLogger('packet')
from pox.lib.util import initHelper
class packet_base (object):
"""
TODO: This description is somewhat outdated and should be fixed.
Base class for packets.
Classes that perform packet manipulation (parsing and contruction)
should derive from this class.
The general layout of such a subclass is as follows:
class foo (packet_base):
def __init__(data=None, prev=None):
packet_base.__init__(self)
# data: is the data for the packet as a "bytes" object.
# prev: is a pointer to the previous header
# which is expected to be of type packet_base
self.parsed = False
self.prev = prev
# define field variables here
self.bar = 0
if arr != None:
self.data = data # Phasing out?
self.parse(data)
def parse(self, data):
# parse packet here and set member variables
self.parsed = True # signal that packet was succesfully parsed
def hdr(self, payload):
# return fields as a string
return struct.pack('!I',self.bar)
def __str__(self):
# optionally convert to human readable string
"""
def __init__ (self):
self.next = None
self.prev = None
self.parsed = False
self.raw = None
def _init (self, kw):
if 'payload' in kw:
self.set_payload(kw['payload'])
del kw['payload']
initHelper(self, kw)
def msg(self, *args):
""" Shortcut for logging """
#TODO: Remove?
lg.info(*args)
def err(self, *args):
""" Shortcut for logging """
#TODO: Remove?
lg.error(*args)
def warn(self, *args):
""" Shortcut for logging """
#TODO: Remove?
lg.warning(*args)
def __nonzero__(self):
return self.parsed is True
def __len__(self):
return len(self.pack())
def __str__(self):
if hasattr(self, "_to_str"):
try:
return self._to_str()
except Exception as e:
#import traceback
#traceback.print_exc()
lg.debug("str(%s): %s" % (self.__class__.__name__, e))
return "[%s:Bad representation]" % (self.__class__.__name__,)
return "[%s l:%i%s]" % (self.__class__.__name__, len(self),
"" if self.next else " *")
def dump(self):
p = self
m = []
while p is not None:
if not isinstance(p, packet_base):
if isinstance(p, bytes):
if len(p) == 0:
m.append("[0 bytes]")
break
s = ''
for t in range(min(len(p), 5)):
s += "%02x " % (ord(p[t]),)
if len(p) > 5: s += "..."
s = s.rstrip()
m.append("[%s bytes: " % (len(p),) + s + "]")
break
try:
l = len(p)
m.append("[%s l:%i]" % (p.__class__.__name__, l))
except:
m.append("[%s]" % (p.__class__.__name__,))
break
m.append(str(p))
p = p.next
return "".join(m)
def find(self, proto):
"""
Find the specified protocol layer based on its class type or name.
"""
if not isinstance(proto, basestring):
proto = proto.__name__
if self.__class__.__name__ == proto and self.parsed:
return self
else:
if self.next and isinstance(self.next, packet_base):
return self.next.find(proto)
else:
return None
@property
def payload (self):
"""
The packet payload property.
Reading this property is generally the same as the "next" field.
Setting this generally sets this packet's "next" field, as well as
setting the new payload's "prev" field to point back to its new
container (the same as the set_payload() method).
"""
return self.next
@payload.setter
def payload (self, new_payload):
self.set_payload(new_payload)
def set_payload(self, payload):
'''
Set the packet payload. Expects bytes or a packet_base subclass.
'''
if isinstance(payload, packet_base):
self.next = payload
payload.prev = self
elif type(payload) == bytes:
self.next = payload
else:
raise TypeError("payload must be string or packet subclass")
def parse(self, raw):
'''Override me with packet parsing code'''
raise NotImplementedError("parse() not implemented")
def pre_hdr(self):
'''Override to prepare before payload is packed'''
pass
def hdr(self, payload):
'''Override me to return packet headers'''
raise NotImplementedError("hdr() not implemented")
@classmethod
def unpack (cls, raw, prev=None):
return cls(raw=raw, prev=prev)
def pack(self):
'''Convert header and payload to bytes'''
if self.parsed is False and self.raw is not None and self.next is None:
return self.raw
self.pre_hdr()
if self.next == None:
return self.hdr(b'')
elif isinstance(self.next, packet_base):
rest = self.next.pack()
else:
rest = self.next
return self.hdr(rest) + rest
| apache-2.0 |
mradamcox/ead | ead/models/forms.py | 1 | 49635 | '''
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from arches.app.models.models import RelatedResource
from arches.app.models.entity import Entity
from arches.app.models.resource import Resource
from arches.app.models.forms import ResourceForm
from arches.app.utils.imageutils import generate_thumbnail
from arches.app.views.concept import get_preflabel_from_valueid
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
from arches.app.search.search_engine_factory import SearchEngineFactory
from django.forms.models import model_to_dict
from django.utils.translation import ugettext as _
from django.forms.models import model_to_dict
from datetime import datetime
from ead.models.concept import Concept
def datetime_nodes_to_dates(branch_list):
for branch in branch_list:
for node in branch['nodes']:
if isinstance(node.value, datetime):
node.value = node.value.date()
node.label = node.value
return branch_list
class SummaryForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'summary',
'icon': 'fa-tag',
'name': _('Resource Summary'),
'class': SummaryForm
}
def update(self, data, files):
self.update_nodes('NAME.E41', data)
self.update_nodes('KEYWORD.E55', data)
if self.resource.entitytypeid in ('HERITAGE_RESOURCE.E18', 'HERITAGE_RESOURCE_GROUP.E27'):
self.update_nodes('RESOURCE_TYPE_CLASSIFICATION.E55', data)
## DATE NODES NOT IN USE 5-9-17
# beginning_of_existence_nodes = []
# end_of_existence_nodes = []
# for branch_list in data['important_dates']:
# for node in branch_list['nodes']:
# if node['entitytypeid'] == 'BEGINNING_OF_EXISTENCE_TYPE.E55':
# beginning_of_existence_nodes.append(branch_list)
# if node['entitytypeid'] == 'END_OF_EXISTENCE_TYPE.E55':
# end_of_existence_nodes.append(branch_list)
# for branch_list in beginning_of_existence_nodes:
# for node in branch_list['nodes']:
# if node['entitytypeid'] == 'START_DATE_OF_EXISTENCE.E49,END_DATE_OF_EXISTENCE.E49':
# node['entitytypeid'] = 'START_DATE_OF_EXISTENCE.E49'
# for branch_list in end_of_existence_nodes:
# for node in branch_list['nodes']:
# if node['entitytypeid'] == 'START_DATE_OF_EXISTENCE.E49,END_DATE_OF_EXISTENCE.E49':
# node['entitytypeid'] = 'END_DATE_OF_EXISTENCE.E49'
# self.update_nodes('BEGINNING_OF_EXISTENCE.E63', {'BEGINNING_OF_EXISTENCE.E63':beginning_of_existence_nodes})
# self.update_nodes('END_OF_EXISTENCE.E64', {'END_OF_EXISTENCE.E64':end_of_existence_nodes})
def load(self, lang):
## DATE NODES NOT IN USE 5-9-17
# self.data['important_dates'] = {
# 'branch_lists': datetime_nodes_to_dates(self.get_nodes('BEGINNING_OF_EXISTENCE.E63') + self.get_nodes('END_OF_EXISTENCE.E64')),
# 'domains': {'important_dates' : Concept().get_e55_domain('BEGINNING_OF_EXISTENCE_TYPE.E55') + Concept().get_e55_domain('END_OF_EXISTENCE_TYPE.E55')}
# }
if self.resource:
if self.resource.entitytypeid in ('HERITAGE_RESOURCE.E18', 'HERITAGE_RESOURCE_GROUP.E27'):
self.data['RESOURCE_TYPE_CLASSIFICATION.E55'] = {
'branch_lists': self.get_nodes('RESOURCE_TYPE_CLASSIFICATION.E55'),
'domains': {'RESOURCE_TYPE_CLASSIFICATION.E55' : Concept().get_e55_domain('RESOURCE_TYPE_CLASSIFICATION.E55')}
}
self.data['NAME.E41'] = {
'branch_lists': self.get_nodes('NAME.E41'),
'domains': {'NAME_TYPE.E55' : Concept().get_e55_domain('NAME_TYPE.E55')}
# 'defaults': {
# 'NAME_TYPE.E55': default_name_type['id'],
# 'NAME.E41': ''
# }
}
self.data['KEYWORD.E55'] = {
'branch_lists': self.get_nodes('KEYWORD.E55'),
'domains': {'KEYWORD.E55' : Concept().get_e55_domain('KEYWORD.E55')}
}
try:
self.data['primaryname_conceptid'] = self.data['NAME.E41']['domains']['NAME_TYPE.E55'][3]['id']
except IndexError:
pass
class ExternalReferenceForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'external-reference',
'icon': 'fa-random',
'name': _('External System References'),
'class': ExternalReferenceForm
}
def update(self, data, files):
self.update_nodes('EXTERNAL_RESOURCE.E1', data)
return
def load(self, lang):
if self.resource:
self.data['EXTERNAL_RESOURCE.E1'] = {
'branch_lists': self.get_nodes('EXTERNAL_RESOURCE.E1'),
'domains': {
'EXTERNAL_XREF_TYPE.E55': Concept().get_e55_domain('EXTERNAL_XREF_TYPE.E55'),
}
}
class ActivityActionsForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'activity-actions',
'icon': 'fa-flash',
'name': _('Actions'),
'class': ActivityActionsForm
}
def update(self, data, files):
self.update_nodes('PHASE_TYPE_ASSIGNMENT.E17', data)
return
def load(self, lang):
if self.resource:
phase_type_nodes = datetime_nodes_to_dates(self.get_nodes('PHASE_TYPE_ASSIGNMENT.E17'))
self.data['PHASE_TYPE_ASSIGNMENT.E17'] = {
'branch_lists': phase_type_nodes,
'domains': {
'ACTIVITY_TYPE.E55': Concept().get_e55_domain('ACTIVITY_TYPE.E55'),
}
}
class ActivitySummaryForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'activity-summary',
'icon': 'fa-tag',
'name': _('Resource Summary'),
'class': ActivitySummaryForm
}
def update(self, data, files):
self.update_nodes('NAME.E41', data)
self.update_nodes('KEYWORD.E55', data)
self.update_nodes('BEGINNING_OF_EXISTENCE.E63', data)
self.update_nodes('END_OF_EXISTENCE.E64', data)
def load(self, lang):
if self.resource:
self.data['NAME.E41'] = {
'branch_lists': self.get_nodes('NAME.E41'),
'domains': {'NAME_TYPE.E55' : Concept().get_e55_domain('NAME_TYPE.E55')}
}
self.data['KEYWORD.E55'] = {
'branch_lists': self.get_nodes('KEYWORD.E55'),
'domains': {'KEYWORD.E55' : Concept().get_e55_domain('KEYWORD.E55')}
}
self.data['BEGINNING_OF_EXISTENCE.E63'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('BEGINNING_OF_EXISTENCE.E63')),
'domains': {
'BEGINNING_OF_EXISTENCE_TYPE.E55' : Concept().get_e55_domain('BEGINNING_OF_EXISTENCE_TYPE.E55')
}
}
self.data['END_OF_EXISTENCE.E64'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('END_OF_EXISTENCE.E64')),
'domains': {
'END_OF_EXISTENCE_TYPE.E55' : Concept().get_e55_domain('END_OF_EXISTENCE_TYPE.E55')
}
}
try:
self.data['primaryname_conceptid'] = self.data['NAME.E41']['domains']['NAME_TYPE.E55'][3]['id']
except IndexError:
pass
class ComponentForm(ResourceForm):
baseentity = None
@staticmethod
def get_info():
return {
'id': 'component',
'icon': 'fa fa-bar-chart-o',
'name': _('Components'),
'class': ComponentForm
}
def update(self, data, files):
self.update_nodes('COMPONENT.E18', data)
self.update_nodes('MODIFICATION_EVENT.E11', data)
return
def update_nodes(self, entitytypeid, data):
self.resource.prune(entitytypes=[entitytypeid])
if self.schema == None:
self.schema = Entity.get_mapping_schema(self.resource.entitytypeid)
for value in data[entitytypeid]:
self.baseentity = None
for newentity in value[nodes]:
entity = Entity()
entity.create_from_mapping(self.resource.entitytypeid, self.schema[newentity['entitytypeid']]['steps'], newentity['entitytypeid'], newentity['value'], newentity['entityid'])
if self.baseentity == None:
self.baseentity = entity
else:
self.baseentity.merge(entity)
if entitytypeid == 'COMPONENT.E18':
production_entities = self.resource.find_entities_by_type_id('PRODUCTION.E12')
if len(production_entities) > 0:
self.resource.merge_at(self.baseentity, 'PRODUCTION.E12')
else:
self.resource.merge_at(self.baseentity, self.resource.entitytypeid)
else:
self.resource.merge_at(self.baseentity, self.resource.entitytypeid)
self.resource.trim()
def load(self, lang):
if self.resource:
self.data['COMPONENT.E18'] = {
'branch_lists': self.get_nodes('COMPONENT.E18'),
'domains': {
'CONSTRUCTION_TECHNIQUE.E55': Concept().get_e55_domain('CONSTRUCTION_TECHNIQUE.E55'),
'MATERIAL.E57' : Concept().get_e55_domain('MATERIAL.E57'),
'COMPONENT_TYPE.E55' : Concept().get_e55_domain('COMPONENT_TYPE.E55')
}
}
self.data['MODIFICATION_EVENT.E11'] = {
'branch_lists': self.get_nodes('MODIFICATION_EVENT.E11'),
'domains': {
'MODIFICATION_TYPE.E55': Concept().get_e55_domain('MODIFICATION_TYPE.E55'),
}
}
class ClassificationForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'classification',
'icon': 'fa-adjust',
'name': _('Classifications'),
'class': ClassificationForm
}
def update(self, data, files):
self.update_nodes('PHASE_TYPE_ASSIGNMENT.E17', data)
def load(self, lang):
if self.resource.entitytypeid == 'HERITAGE_RESOURCE.E18':
type_node = 'HERITAGE_RESOURCE_TYPE.E55'
if self.resource.entitytypeid == 'HERITAGE_RESOURCE_GROUP.E27':
type_node = 'HERITAGE_RESOURCE_GROUP_TYPE.E55'
if self.resource:
self.data['PHASE_TYPE_ASSIGNMENT.E17'] = {
'branch_lists': self.get_nodes('PHASE_TYPE_ASSIGNMENT.E17'),
'domains': {
type_node : Concept().get_e55_domain(type_node),
'CULTURAL_PERIOD.E55': Concept().get_e55_domain('CULTURAL_PERIOD.E55'),
'DYNASTY.E55': Concept().get_e55_domain('DYNASTY.E55'),
'RULER.E55': Concept().get_e55_domain('RULER.E55')
}
}
class HistoricalEventSummaryForm(ActivitySummaryForm):
@staticmethod
def get_info():
return {
'id': 'historical-event-summary',
'icon': 'fa-tag',
'name': _('Resource Summary'),
'class': HistoricalEventSummaryForm
}
class InformationResourceSummaryForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'information-resource-summary',
'icon': 'fa-tag',
'name': _('Resource Summary'),
'class': InformationResourceSummaryForm
}
def update(self, data, files):
self.update_nodes('TITLE.E41', data)
self.update_nodes('IDENTIFIER.E42', data)
self.update_nodes('KEYWORD.E55', data)
self.update_nodes('INFORMATION_CARRIER.E84', data)
self.update_nodes('LANGUAGE.E55', data)
def load(self, lang):
if self.resource:
self.data['TITLE.E41'] = {
'branch_lists': self.get_nodes('TITLE.E41'),
'domains': {'TITLE_TYPE.E55' : Concept().get_e55_domain('TITLE_TYPE.E55')}
}
self.data['IDENTIFIER.E42'] = {
'branch_lists': self.get_nodes('IDENTIFIER.E42'),
'domains': {
'IDENTIFIER_TYPE.E55' : Concept().get_e55_domain('IDENTIFIER_TYPE.E55')
}
}
self.data['INFORMATION_CARRIER.E84'] = {
'branch_lists': self.get_nodes('INFORMATION_CARRIER.E84'),
'domains': {
'INFORMATION_CARRIER_FORMAT_TYPE.E55' : Concept().get_e55_domain('INFORMATION_CARRIER_FORMAT_TYPE.E55')
}
}
self.data['LANGUAGE.E55'] = {
'branch_lists': self.get_nodes('LANGUAGE.E55'),
'domains': {'LANGUAGE.E55' : Concept().get_e55_domain('LANGUAGE.E55')}
}
self.data['KEYWORD.E55'] = {
'branch_lists': self.get_nodes('KEYWORD.E55'),
'domains': {'KEYWORD.E55' : Concept().get_e55_domain('KEYWORD.E55')}
}
# self.data['primaryname_conceptid'] = self.data['TITLE.E41']['domains']['TITLE_TYPE.E55'][3]['id']
class DescriptionForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'description',
'icon': 'fa-picture-o',
'name': _('Descriptions'),
'class': DescriptionForm
}
def update(self, data, files):
self.update_nodes('DESCRIPTION.E62', data)
if self.resource.entitytypeid in ['HERITAGE_RESOURCE.E18','HERITAGE_RESOURCE_GROUP.E27']:
self.update_nodes('ARABIC_DESCRIPTION.E62', data)
def load(self, lang):
description_types = Concept().get_e55_domain('DESCRIPTION_TYPE.E55')
try:
default_description_type = description_types[2]
if self.resource:
self.data['DESCRIPTION.E62'] = {
'branch_lists': self.get_nodes('DESCRIPTION.E62'),
'domains': {'DESCRIPTION_TYPE.E55' : description_types},
'defaults': {
'DESCRIPTION_TYPE.E55': default_description_type['id'],
}
}
if self.resource.entitytypeid in ['HERITAGE_RESOURCE.E18','HERITAGE_RESOURCE_GROUP.E27']:
description_types = Concept().get_e55_domain('ARABIC_DESCRIPTION_TYPE.E55')
self.data['ARABIC_DESCRIPTION.E62'] = {
'branch_lists': self.get_nodes('ARABIC_DESCRIPTION.E62'),
'domains': {'ARABIC_DESCRIPTION_TYPE.E55' : description_types},
'defaults': {
'ARABIC_DESCRIPTION_TYPE.E55': default_description_type['id'],
}
}
except IndexError:
pass
class MeasurementForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'measurement',
'icon': 'fa-th-large',
'name': _('Measurements'),
'class': MeasurementForm
}
def update(self, data, files):
self.update_nodes('MEASUREMENT_TYPE.E55', data)
def load(self, lang):
if self.resource:
self.data['MEASUREMENT_TYPE.E55'] = {
'branch_lists': self.get_nodes('MEASUREMENT_TYPE.E55'),
'domains': {
'MEASUREMENT_TYPE.E55' : Concept().get_e55_domain('MEASUREMENT_TYPE.E55'),
'UNIT_OF_MEASUREMENT.E55': Concept().get_e55_domain('UNIT_OF_MEASUREMENT.E55')
}
}
class ConditionForm(ResourceForm):
baseentity = None
@staticmethod
def get_info():
return {
'id': 'condition',
'icon': 'fa-asterisk',
'name': _('Condition Assessment'),
'class': ConditionForm
}
def get_nodes(self, entity, entitytypeid):
ret = []
entities = entity.find_entities_by_type_id(entitytypeid)
for entity in entities:
ret.append({'nodes': entity.flatten()})
return ret
def update_nodes(self, entitytypeid, data):
if self.schema == None:
self.schema = Entity.get_mapping_schema(self.resource.entitytypeid)
for value in data[entitytypeid]:
if entitytypeid == 'CONDITION_IMAGE.E73':
temp = None
for newentity in value['nodes']:
if newentity['entitytypeid'] != 'CONDITION_IMAGE.E73':
entity = Entity()
entity.create_from_mapping(self.resource.entitytypeid, self.schema[newentity['entitytypeid']]['steps'], newentity['entitytypeid'], newentity['value'], newentity['entityid'])
if temp == None:
temp = entity
else:
temp.merge(entity)
self.baseentity.merge_at(temp, 'CONDITION_STATE.E3')
else:
for newentity in value['nodes']:
entity = Entity()
entity.create_from_mapping(self.resource.entitytypeid, self.schema[newentity['entitytypeid']]['steps'], newentity['entitytypeid'], newentity['value'], newentity['entityid'])
if self.baseentity == None:
self.baseentity = entity
else:
self.baseentity.merge(entity)
def update(self, data, files):
if len(files) > 0:
for f in files:
data['CONDITION_IMAGE.E73'].append({
'nodes':[{
'entitytypeid': 'CONDITION_IMAGE_FILE_PATH.E62',
'entityid': '',
'value': files[f]
},{
'entitytypeid': 'CONDITION_IMAGE_THUMBNAIL.E62',
'entityid': '',
'value': generate_thumbnail(files[f])
}]
})
for value in data['CONDITION_ASSESSMENT.E14']:
for node in value['nodes']:
if node['entitytypeid'] == 'CONDITION_ASSESSMENT.E14' and node['entityid'] != '':
#remove the node
self.resource.filter(lambda entity: entity.entityid != node['entityid'])
self.update_nodes('CONDITION_TYPE.E55', data)
self.update_nodes('THREAT_TYPE.E55', data)
self.update_nodes('RECOMMENDATION_TYPE.E55', data)
self.update_nodes('DATE_CONDITION_ASSESSED.E49', data)
self.update_nodes('CONDITION_DESCRIPTION.E62', data)
self.update_nodes('DISTURBANCE_TYPE.E55', data)
self.update_nodes('CONDITION_IMAGE.E73', data)
self.resource.merge_at(self.baseentity, self.resource.entitytypeid)
self.resource.trim()
def load(self, lang):
self.data = {
'data': [],
'domains': {
'DISTURBANCE_TYPE.E55': Concept().get_e55_domain('DISTURBANCE_TYPE.E55'),
'CONDITION_TYPE.E55' : Concept().get_e55_domain('CONDITION_TYPE.E55'),
'THREAT_TYPE.E55' : Concept().get_e55_domain('THREAT_TYPE.E55'),
'RECOMMENDATION_TYPE.E55' : Concept().get_e55_domain('RECOMMENDATION_TYPE.E55')
}
}
condition_assessment_entities = self.resource.find_entities_by_type_id('CONDITION_ASSESSMENT.E14')
for entity in condition_assessment_entities:
self.data['data'].append({
'DISTURBANCE_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'DISTURBANCE_TYPE.E55')
},
'CONDITION_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'CONDITION_TYPE.E55')
},
'THREAT_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'THREAT_TYPE.E55')
},
'RECOMMENDATION_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'RECOMMENDATION_TYPE.E55')
},
'DATE_CONDITION_ASSESSED.E49': {
'branch_lists': datetime_nodes_to_dates(self.get_nodes(entity, 'DATE_CONDITION_ASSESSED.E49'))
},
'CONDITION_DESCRIPTION.E62': {
'branch_lists': self.get_nodes(entity, 'CONDITION_DESCRIPTION.E62')
},
'CONDITION_IMAGE.E73': {
'branch_lists': self.get_nodes(entity, 'CONDITION_IMAGE.E73')
},
'CONDITION_ASSESSMENT.E14': {
'branch_lists': self.get_nodes(entity, 'CONDITION_ASSESSMENT.E14')
}
})
class LocationForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'location',
'icon': 'fa-map-marker',
'name': _('Location'),
'class': LocationForm
}
def update(self, data, files):
if self.resource.entitytypeid not in ['ACTOR.E39']:
self.update_nodes('SPATIAL_COORDINATES_GEOMETRY.E47', data)
self.update_nodes('DESCRIPTION_OF_LOCATION.E62', data)
self.update_nodes('GRID_REF.E42', data)
self.update_nodes('ADMINISTRATIVE_SUBDIVISION.E48', data)
if self.resource.entitytypeid not in ['ACTOR.E39', 'ACTIVITY.E7', 'HISTORICAL_EVENT.E5']:
self.update_nodes('PLACE_APPELLATION_CADASTRAL_REFERENCE.E44', data)
if self.resource.entitytypeid not in ['ACTOR.E39', 'ACTIVITY.E7', 'HERITAGE_RESOURCE_GROUP.E27', 'HISTORICAL_EVENT.E5']:
self.update_nodes('SETTING_TYPE.E55', data)
if self.resource.entitytypeid not in ['ACTOR.E39', 'ACTIVITY.E7', 'HISTORICAL_EVENT.E5']:
self.update_nodes('DESCRIPTION_OF_LOCATION.E62', data)
return
def load(self, lang):
self.data['SPATIAL_COORDINATES_GEOMETRY.E47'] = {
'branch_lists': self.get_nodes('SPATIAL_COORDINATES_GEOMETRY.E47'),
'domains': {
'GEOMETRY_QUALIFIER.E55': Concept().get_e55_domain('GEOMETRY_QUALIFIER.E55')
}
}
self.data['GRID_REF.E42'] = {
'branch_lists': self.get_nodes('GRID_REF.E42'),
'domains': {
'GRID_REF_TYPE.E55': Concept().get_e55_domain('GRID_REF_TYPE.E55')
}
}
self.data['DESCRIPTION_OF_LOCATION.E62'] = {
'branch_lists': self.get_nodes('DESCRIPTION_OF_LOCATION.E62'),
'domains': {}
}
self.data['SETTING_TYPE.E55'] = {
'branch_lists': self.get_nodes('SETTING_TYPE.E55'),
'domains': {
'SETTING_TYPE.E55': Concept().get_e55_domain('SETTING_TYPE.E55')
}
}
self.data['ADMINISTRATIVE_SUBDIVISION.E48'] = {
'branch_lists': self.get_nodes('ADMINISTRATIVE_SUBDIVISION.E48'),
'domains': {
'ADMINISTRATIVE_SUBDIVISION_TYPE.E55': Concept().get_e55_domain('ADMINISTRATIVE_SUBDIVISION_TYPE.E55'),
'ADMINISTRATIVE_SUBDIVISION.E48': Concept().get_e55_domain('ADMINISTRATIVE_SUBDIVISION.E48')
}
}
self.data['PLACE_APPELLATION_CADASTRAL_REFERENCE.E44'] = {
'branch_lists': self.get_nodes('PLACE_APPELLATION_CADASTRAL_REFERENCE.E44'),
'domains': {}
}
return
class CoverageForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'coverage',
'icon': 'fa-crosshairs',
'name': _('Coverage'),
'class': CoverageForm
}
def update(self, data, files):
self.update_nodes('SPATIAL_COORDINATES_GEOMETRY.E47', data)
self.update_nodes('DESCRIPTION_OF_LOCATION.E62', data)
self.update_nodes('TEMPORAL_COVERAGE_TIME-SPAN.E52', data)
return
def load(self, lang):
self.data['SPATIAL_COORDINATES_GEOMETRY.E47'] = {
'branch_lists': self.get_nodes('SPATIAL_COORDINATES_GEOMETRY.E47'),
'domains': {
'GEOMETRY_QUALIFIER.E55': Concept().get_e55_domain('GEOMETRY_QUALIFIER.E55')
}
}
self.data['DESCRIPTION_OF_LOCATION.E62'] = {
'branch_lists': self.get_nodes('DESCRIPTION_OF_LOCATION.E62'),
'domains': {}
}
self.data['TEMPORAL_COVERAGE_TIME-SPAN.E52'] = {
'branch_lists': self.get_nodes('TEMPORAL_COVERAGE_TIME-SPAN.E52'),
'domains': {}
}
return
class RelatedFilesForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'related-files',
'icon': 'fa-file-text-o',
'name': _('Images and Files'),
'class': RelatedFilesForm
}
def update(self, data, files):
filedict = {}
se = SearchEngineFactory().create()
for name in files:
for f in files.getlist(name):
filedict[f.name] = f
for newfile in data.get('new-files', []):
resource = Resource()
resource.entitytypeid = 'INFORMATION_RESOURCE.E73'
resource.set_entity_value('TITLE_TYPE.E55', newfile['title_type']['value'])
resource.set_entity_value('TITLE.E41', newfile['title'])
if newfile.get('description') and len(newfile.get('description')) > 0:
resource.set_entity_value('DESCRIPTION_TYPE.E55', newfile['description_type']['value'])
resource.set_entity_value('DESCRIPTION.E62', newfile.get('description'))
resource.set_entity_value('FILE_PATH.E62', filedict[newfile['id']])
thumbnail = generate_thumbnail(filedict[newfile['id']])
if thumbnail != None:
resource.set_entity_value('THUMBNAIL.E62', thumbnail)
resource.save()
resource.index()
if self.resource.entityid == '':
self.resource.save()
relationship = self.resource.create_resource_relationship(resource.entityid, relationship_type_id=newfile['relationshiptype']['value'])
se.index_data(index='resource_relations', doc_type='all', body=model_to_dict(relationship), idfield='resourcexid')
edited_file = data.get('current-files', None)
if edited_file:
title = ''
title_type = ''
description = ''
description_type = ''
for node in edited_file.get('nodes'):
if node['entitytypeid'] == 'TITLE.E41':
title = node.get('value')
elif node['entitytypeid'] == 'TITLE_TYPE.E55':
title_type = node.get('value')
elif node['entitytypeid'] == 'DESCRIPTION.E62':
description = node.get('value')
elif node['entitytypeid'] == 'DESCRIPTION_TYPE.E55':
description_type = node.get('value')
elif node['entitytypeid'] == 'ARCHES_RESOURCE_CROSS-REFERENCE_RELATIONSHIP_TYPES.E55':
resourcexid = node.get('resourcexid')
entityid1 = node.get('entityid1')
entityid2 = node.get('entityid2')
relationship = RelatedResource.objects.get(pk=resourcexid)
relationship.relationshiptype = node.get('value')
relationship.save()
se.delete(index='resource_relations', doc_type='all', id=resourcexid)
se.index_data(index='resource_relations', doc_type='all', body=model_to_dict(relationship), idfield='resourcexid')
relatedresourceid = entityid2 if self.resource.entityid == entityid1 else entityid1
relatedresource = Resource().get(relatedresourceid)
relatedresource.set_entity_value('TITLE_TYPE.E55', title_type)
relatedresource.set_entity_value('TITLE.E41', title)
if description != '':
relatedresource.set_entity_value('DESCRIPTION_TYPE.E55', description_type)
relatedresource.set_entity_value('DESCRIPTION.E62', description)
relatedresource.save()
relatedresource.index()
return
def load(self, lang):
data = []
for relatedentity in self.resource.get_related_resources(entitytypeid='INFORMATION_RESOURCE.E73'):
nodes = relatedentity['related_entity'].flatten()
dummy_relationship_entity = model_to_dict(relatedentity['relationship'])
dummy_relationship_entity['entitytypeid'] = 'ARCHES_RESOURCE_CROSS-REFERENCE_RELATIONSHIP_TYPES.E55'
dummy_relationship_entity['value'] = dummy_relationship_entity['relationshiptype']
dummy_relationship_entity['label'] = ''
nodes.append(dummy_relationship_entity)
data.append({'nodes': nodes, 'relationshiptypelabel': get_preflabel_from_valueid(relatedentity['relationship'].relationshiptype, lang)['value']})
self.data['current-files'] = {
'branch_lists': data,
'domains': {
'RELATIONSHIP_TYPES.E32': Concept().get_e55_domain('ARCHES_RESOURCE_CROSS-REFERENCE_RELATIONSHIP_TYPES.E55'),
'TITLE_TYPE.E55': Concept().get_e55_domain('TITLE_TYPE.E55'),
'DESCRIPTION_TYPE.E55': Concept().get_e55_domain('DESCRIPTION_TYPE.E55')
}
}
return
class FileUploadForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'file-upload',
'icon': 'fa-file-text-o',
'name': _('File Upload'),
'class': FileUploadForm
}
def update(self, data, files):
self.resource.prune(entitytypes=['FILE_PATH.E62', 'THUMBNAIL.E62'])
self.resource.trim()
if files:
for key, value in files.items():
self.resource.set_entity_value('FILE_PATH.E62', value)
thumbnail = generate_thumbnail(value)
if thumbnail != None:
self.resource.set_entity_value('THUMBNAIL.E62', thumbnail)
return
def load(self, lang):
if self.resource:
self.data['INFORMATION_RESOURCE.E73'] = {
'branch_lists': self.get_nodes('INFORMATION_RESOURCE.E73'),
'is_image': is_image(self.resource)
}
return
def is_image(resource):
for format_type in resource.find_entities_by_type_id('INFORMATION_CARRIER_FORMAT_TYPE.E55'):
concept = Concept().get(id=format_type['conceptid'], include=['undefined'])
for value in concept.values:
if value.value == 'Y' and value.type == 'ViewableInBrowser':
return True
return False
class DesignationForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'designation',
'icon': 'fa-shield',
'name': _('Designation'),
'class': DesignationForm
}
def update(self, data, files):
self.update_nodes('PROTECTION_EVENT.E65', data)
return
def load(self, lang):
if self.resource:
self.data['PROTECTION_EVENT.E65'] = {
'branch_lists': self.get_nodes('PROTECTION_EVENT.E65'),
'domains': {
'TYPE_OF_DESIGNATION_OR_PROTECTION.E55' : Concept().get_e55_domain('TYPE_OF_DESIGNATION_OR_PROTECTION.E55')
}
}
return
class RoleForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'roles',
'icon': 'fa-flash',
'name': _('Role'),
'class': RoleForm
}
def update(self, data, files):
self.update_nodes('PHASE_TYPE_ASSIGNMENT.E17', data)
return
def load(self, lang):
if self.resource:
self.data['PHASE_TYPE_ASSIGNMENT.E17'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('PHASE_TYPE_ASSIGNMENT.E17')),
'domains': {
'ACTOR_TYPE.E55' : Concept().get_e55_domain('ACTOR_TYPE.E55'),
'CULTURAL_PERIOD.E55' : Concept().get_e55_domain('CULTURAL_PERIOD.E55')
}
}
return
class ActorSummaryForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'actor-summary',
'icon': 'fa-tag',
'name': _('Actor Summary'),
'class': ActorSummaryForm
}
def update(self, data, files):
self.update_nodes('APPELLATION.E41', data)
self.update_nodes('EPITHET.E82', data)
self.update_nodes('BEGINNING_OF_EXISTENCE.E63', data)
self.update_nodes('END_OF_EXISTENCE.E64', data)
self.update_nodes('KEYWORD.E55', data)
def load(self, lang):
if self.resource:
self.data['APPELLATION.E41'] = {
'branch_lists': self.get_nodes('APPELLATION.E41'),
'domains': {
'NAME_TYPE.E55' : Concept().get_e55_domain('NAME_TYPE.E55')
}
}
self.data['EPITHET.E82'] = {
'branch_lists': self.get_nodes('EPITHET.E82'),
}
self.data['BEGINNING_OF_EXISTENCE.E63'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('BEGINNING_OF_EXISTENCE.E63')),
'domains': {
'BEGINNING_OF_EXISTENCE_TYPE.E55' : Concept().get_e55_domain('BEGINNING_OF_EXISTENCE_TYPE.E55')
}
}
self.data['END_OF_EXISTENCE.E64'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('END_OF_EXISTENCE.E64')),
'domains': {
'END_OF_EXISTENCE_TYPE.E55' : Concept().get_e55_domain('END_OF_EXISTENCE_TYPE.E55')
}
}
self.data['KEYWORD.E55'] = {
'branch_lists': self.get_nodes('KEYWORD.E55'),
'domains': {
'KEYWORD.E55' : Concept().get_e55_domain('KEYWORD.E55')}
}
try:
self.data['primaryname_conceptid'] = self.data['APPELLATION.E41']['domains']['NAME_TYPE.E55'][3]['id']
except IndexError:
pass
class PhaseForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'phase',
'icon': 'fa-flash',
'name': _('Phase'),
'class': PhaseForm
}
def update(self, data, files):
self.update_nodes('PHASE_TYPE_ASSIGNMENT.E17', data)
return
def load(self, lang):
if self.resource:
self.data['PHASE_TYPE_ASSIGNMENT.E17'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('PHASE_TYPE_ASSIGNMENT.E17')),
'domains': {
'HISTORICAL_EVENT_TYPE.E55' : Concept().get_e55_domain('HISTORICAL_EVENT_TYPE.E55'),
'CULTURAL_PERIOD.E55' : Concept().get_e55_domain('CULTURAL_PERIOD.E55')
}
}
return
class EvaluationForm(ResourceForm):
baseentity = None
@staticmethod
def get_info():
return {
'id': 'evaluation',
'icon': 'fa-star-half-o',
'name': _('Evaluation Criteria'),
'class': EvaluationForm
}
def get_nodes(self, entity, entitytypeid):
ret = []
entities = entity.find_entities_by_type_id(entitytypeid)
for entity in entities:
ret.append({'nodes': entity.flatten()})
return ret
def update_nodes(self, entitytypeid, data):
# self.resource.prune(entitytypes=[entitytypeid])
if self.schema == None:
self.schema = Entity.get_mapping_schema(self.resource.entitytypeid)
for value in data[entitytypeid]:
for newentity in value['nodes']:
entity = Entity()
entity.create_from_mapping(self.resource.entitytypeid, self.schema[newentity['entitytypeid']]['steps'], newentity['entitytypeid'], newentity['value'], newentity['entityid'])
if self.baseentity == None:
self.baseentity = entity
else:
self.baseentity.merge(entity)
# self.resource.trim()
def update(self, data, files):
for value in data['EVALUATION_CRITERIA_ASSIGNMENT.E13']:
for node in value['nodes']:
if node['entitytypeid'] == 'EVALUATION_CRITERIA_ASSIGNMENT.E13' and node['entityid'] != '':
#remove the node
self.resource.filter(lambda entity: entity.entityid != node['entityid'])
self.update_nodes('STATUS.E55', data)
self.update_nodes('EVALUATION_CRITERIA_TYPE.E55', data)
self.update_nodes('ELIGIBILITY_REQUIREMENT_TYPE.E55', data)
self.update_nodes('INTEGRITY_TYPE.E55', data)
self.update_nodes('REASONS.E62', data)
self.update_nodes('DATE_EVALUATED.E49', data)
self.resource.merge_at(self.baseentity, self.resource.entitytypeid)
self.resource.trim()
def load(self, lang):
self.data = {
'data': [],
'domains': {
'STATUS.E55': Concept().get_e55_domain('STATUS.E55'),
'EVALUATION_CRITERIA_TYPE.E55' : Concept().get_e55_domain('EVALUATION_CRITERIA_TYPE.E55'),
'INTEGRITY_TYPE.E55' : Concept().get_e55_domain('INTEGRITY_TYPE.E55'),
'ELIGIBILITY_REQUIREMENT_TYPE.E55' : Concept().get_e55_domain('ELIGIBILITY_REQUIREMENT_TYPE.E55')
}
}
evaluation_assessment_entities = self.resource.find_entities_by_type_id('EVALUATION_CRITERIA_ASSIGNMENT.E13')
for entity in evaluation_assessment_entities:
self.data['data'].append({
'STATUS.E55': {
'branch_lists': self.get_nodes(entity, 'STATUS.E55')
},
'EVALUATION_CRITERIA_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'EVALUATION_CRITERIA_TYPE.E55')
},
'INTEGRITY_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'INTEGRITY_TYPE.E55')
},
'ELIGIBILITY_REQUIREMENT_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'ELIGIBILITY_REQUIREMENT_TYPE.E55')
},
'REASONS.E62': {
'branch_lists': self.get_nodes(entity, 'REASONS.E62')
},
'EVALUATION_CRITERIA_ASSIGNMENT.E13': {
'branch_lists': self.get_nodes(entity, 'EVALUATION_CRITERIA_ASSIGNMENT.E13')
},
'DATE_EVALUATED.E49': {
'branch_lists': self.get_nodes(entity, 'DATE_EVALUATED.E49')
}
})
class RelatedResourcesForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'related-resources',
'icon': 'fa-exchange',
'name': _('Related Resources'),
'class': RelatedResourcesForm
}
def update(self, data, files):
se = SearchEngineFactory().create()
related_resources_data = data.get('related-resources', [])
original_relations = self.resource.get_related_resources()
if self.resource.entityid == '':
self.resource.save()
relationship_ids = []
for related_resource in related_resources_data:
relationship_id = related_resource['relationship']['resourcexid']
relationship_ids.append(relationship_id)
resource_id = related_resource['relatedresourceid']
relationship_type_id = related_resource['relationship']['relationshiptype']
if isinstance(relationship_type_id, dict):
relationship_type_id = relationship_type_id['value']
notes = related_resource['relationship']['notes']
date_started = related_resource['relationship']['datestarted']
date_ended = related_resource['relationship']['dateended']
if not relationship_id:
relationship = self.resource.create_resource_relationship(resource_id, relationship_type_id=relationship_type_id, notes=notes, date_started=date_started, date_ended=date_ended)
else:
relationship = RelatedResource.objects.get(pk=relationship_id)
relationship.relationshiptype = relationship_type_id
relationship.notes = notes
relationship.datestarted = date_started
relationship.dateended = date_ended
relationship.save()
se.delete(index='resource_relations', doc_type='all', id=relationship_id)
se.index_data(index='resource_relations', doc_type='all', body=model_to_dict(relationship), idfield='resourcexid')
for relatedentity in original_relations:
if relatedentity['relationship'].resourcexid not in relationship_ids:
se.delete(index='resource_relations', doc_type='all', id=relatedentity['relationship'].resourcexid)
relatedentity['relationship'].delete()
def load(self, lang):
data = []
for relatedentity in self.resource.get_related_resources():
nodes = relatedentity['related_entity'].flatten()
data.append({
'nodes': nodes,
'relationship': relatedentity['relationship'],
'relationshiptypelabel': get_preflabel_from_valueid(relatedentity['relationship'].relationshiptype, lang)['value'],
'relatedresourcename':relatedentity['related_entity'].get_primary_name(),
'relatedresourcetype':relatedentity['related_entity'].entitytypeid,
'relatedresourceid':relatedentity['related_entity'].entityid,
'related': True,
})
relationship_types = Concept().get_e55_domain('ARCHES_RESOURCE_CROSS-REFERENCE_RELATIONSHIP_TYPES.E55')
try:
default_relationship_type = relationship_types[0]['id']
if len(relationship_types) > 6:
default_relationship_type = relationship_types[6]['id']
self.data['related-resources'] = {
'branch_lists': data,
'domains': {
'RELATIONSHIP_TYPES.E32': relationship_types
},
'default_relationship_type': default_relationship_type
}
self.data['resource-id'] = self.resource.entityid
except IndexError:
pass
class DistrictClassificationForm(ResourceForm):
baseentity = None
@staticmethod
def get_info():
return {
'id': 'district_classification',
'icon': 'fa-adjust',
'name': _('Classifications'),
'class': DistrictClassificationForm
}
def get_nodes(self, entity, entitytypeid):
ret = []
entities = entity.find_entities_by_type_id(entitytypeid)
for entity in entities:
ret.append({'nodes': entity.flatten()})
return ret
def update_nodes(self, entitytypeid, data):
if self.schema == None:
self.schema = Entity.get_mapping_schema(self.resource.entitytypeid)
for value in data[entitytypeid]:
for newentity in value['nodes']:
entity = Entity()
entity.create_from_mapping(self.resource.entitytypeid, self.schema[newentity['entitytypeid']]['steps'], newentity['entitytypeid'], newentity['value'], newentity['entityid'])
if self.baseentity == None:
self.baseentity = entity
else:
self.baseentity.merge(entity)
def update(self, data, files):
for value in data['PHASE_TYPE_ASSIGNMENT.E17']:
for node in value['nodes']:
if node['entitytypeid'] == 'PHASE_TYPE_ASSIGNMENT.E17' and node['entityid'] != '':
#remove the node
self.resource.filter(lambda entity: entity.entityid != node['entityid'])
self.update_nodes('HERITAGE_RESOURCE_GROUP_TYPE.E55', data)
self.update_nodes('TO_DATE.E49', data)
self.update_nodes('FROM_DATE.E49', data)
self.update_nodes('HERITAGE_RESOURCE_GROUP_USE_TYPE.E55', data)
self.update_nodes('CULTURAL_PERIOD.E55', data)
self.update_nodes('ANCILLARY_FEATURE_TYPE.E55', data)
production_entities = self.resource.find_entities_by_type_id('PRODUCTION.E12')
if len(production_entities) > 0:
self.resource.merge_at(self.baseentity, 'PRODUCTION.E12')
else:
self.resource.merge_at(self.baseentity, self.resource.entitytypeid)
self.resource.trim()
def load(self, lang):
self.data = {
'data': [],
'domains': {
'HERITAGE_RESOURCE_GROUP_TYPE.E55': Concept().get_e55_domain('HERITAGE_RESOURCE_GROUP_TYPE.E55'),
'HERITAGE_RESOURCE_GROUP_USE_TYPE.E55' : Concept().get_e55_domain('HERITAGE_RESOURCE_GROUP_USE_TYPE.E55'),
'CULTURAL_PERIOD.E55' : Concept().get_e55_domain('CULTURAL_PERIOD.E55'),
'ANCILLARY_FEATURE_TYPE.E55' : Concept().get_e55_domain('ANCILLARY_FEATURE_TYPE.E55')
}
}
classification_entities = self.resource.find_entities_by_type_id('PHASE_TYPE_ASSIGNMENT.E17')
for entity in classification_entities:
to_date_nodes = datetime_nodes_to_dates(self.get_nodes(entity, 'TO_DATE.E49'))
from_date_nodes = datetime_nodes_to_dates(self.get_nodes(entity, 'FROM_DATE.E49'))
self.data['data'].append({
'HERITAGE_RESOURCE_GROUP_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'HERITAGE_RESOURCE_GROUP_TYPE.E55')
},
'HERITAGE_RESOURCE_GROUP_USE_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'HERITAGE_RESOURCE_GROUP_USE_TYPE.E55')
},
'CULTURAL_PERIOD.E55': {
'branch_lists': self.get_nodes(entity, 'CULTURAL_PERIOD.E55')
},
'TO_DATE.E49': {
'branch_lists': to_date_nodes
},
'FROM_DATE.E49': {
'branch_lists': from_date_nodes
},
'ANCILLARY_FEATURE_TYPE.E55': {
'branch_lists': self.get_nodes(entity, 'ANCILLARY_FEATURE_TYPE.E55')
},
'PHASE_TYPE_ASSIGNMENT.E17': {
'branch_lists': self.get_nodes(entity, 'PHASE_TYPE_ASSIGNMENT.E17')
}
})
class PublicationForm(ResourceForm):
@staticmethod
def get_info():
return {
'id': 'publication',
'icon': 'fa-flash',
'name': _('Creation and Publication'),
'class': PublicationForm
}
def update(self, data, files):
self.update_nodes('RESOURCE_CREATION_EVENT.E65', data)
self.update_nodes('PUBLICATION_EVENT.E12', data)
self.update_nodes('RIGHT_TYPE.E55', data)
return
def load(self, lang):
if self.resource:
self.data['RESOURCE_CREATION_EVENT.E65'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('RESOURCE_CREATION_EVENT.E65')),
'domains': {
'INFORMATION_RESOURCE_TYPE.E55' : Concept().get_e55_domain('INFORMATION_RESOURCE_TYPE.E55')
}
}
self.data['PUBLICATION_EVENT.E12'] = {
'branch_lists': datetime_nodes_to_dates(self.get_nodes('PUBLICATION_EVENT.E12')),
'domains': {}
}
self.data['RIGHT_TYPE.E55'] = {
'branch_lists': self.get_nodes('RIGHT_TYPE.E55'),
'domains': {
'RIGHT_TYPE.E55' : Concept().get_e55_domain('RIGHT_TYPE.E55')
}
}
return
| agpl-3.0 |
mancoast/CPythonPyc_test | fail/335_test_loader.py | 4 | 4957 | from importlib import machinery
import imp
import unittest
from .. import abc
from .. import util
from test.support import captured_stdout
class LoaderTests(abc.LoaderTests):
def test_module(self):
with util.uncache('__hello__'), captured_stdout() as stdout:
module = machinery.FrozenImporter.load_module('__hello__')
check = {'__name__': '__hello__',
'__package__': '',
'__loader__': machinery.FrozenImporter,
}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
self.assertFalse(hasattr(module, '__file__'))
def test_package(self):
with util.uncache('__phello__'), captured_stdout() as stdout:
module = machinery.FrozenImporter.load_module('__phello__')
check = {'__name__': '__phello__',
'__package__': '__phello__',
'__path__': ['__phello__'],
'__loader__': machinery.FrozenImporter,
}
for attr, value in check.items():
attr_value = getattr(module, attr)
self.assertEqual(attr_value, value,
"for __phello__.%s, %r != %r" %
(attr, attr_value, value))
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
self.assertFalse(hasattr(module, '__file__'))
def test_lacking_parent(self):
with util.uncache('__phello__', '__phello__.spam'), \
captured_stdout() as stdout:
module = machinery.FrozenImporter.load_module('__phello__.spam')
check = {'__name__': '__phello__.spam',
'__package__': '__phello__',
'__loader__': machinery.FrozenImporter,
}
for attr, value in check.items():
attr_value = getattr(module, attr)
self.assertEqual(attr_value, value,
"for __phello__.spam.%s, %r != %r" %
(attr, attr_value, value))
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
self.assertFalse(hasattr(module, '__file__'))
def test_module_reuse(self):
with util.uncache('__hello__'), captured_stdout() as stdout:
module1 = machinery.FrozenImporter.load_module('__hello__')
module2 = machinery.FrozenImporter.load_module('__hello__')
self.assertIs(module1, module2)
self.assertEqual(stdout.getvalue(),
'Hello world!\nHello world!\n')
def test_module_repr(self):
with util.uncache('__hello__'), captured_stdout():
module = machinery.FrozenImporter.load_module('__hello__')
self.assertEqual(repr(module),
"<module '__hello__' (frozen)>")
# No way to trigger an error in a frozen module.
test_state_after_failure = None
def test_unloadable(self):
assert machinery.FrozenImporter.find_module('_not_real') is None
with self.assertRaises(ImportError) as cm:
machinery.FrozenImporter.load_module('_not_real')
self.assertEqual(cm.exception.name, '_not_real')
class InspectLoaderTests(unittest.TestCase):
"""Tests for the InspectLoader methods for FrozenImporter."""
def test_get_code(self):
# Make sure that the code object is good.
name = '__hello__'
with captured_stdout() as stdout:
code = machinery.FrozenImporter.get_code(name)
mod = imp.new_module(name)
exec(code, mod.__dict__)
self.assertTrue(hasattr(mod, 'initialized'))
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
def test_get_source(self):
# Should always return None.
result = machinery.FrozenImporter.get_source('__hello__')
self.assertIsNone(result)
def test_is_package(self):
# Should be able to tell what is a package.
test_for = (('__hello__', False), ('__phello__', True),
('__phello__.spam', False))
for name, is_package in test_for:
result = machinery.FrozenImporter.is_package(name)
self.assertEqual(bool(result), is_package)
def test_failure(self):
# Raise ImportError for modules that are not frozen.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(machinery.FrozenImporter, meth_name)
with self.assertRaises(ImportError) as cm:
method('importlib')
self.assertEqual(cm.exception.name, 'importlib')
def test_main():
from test.support import run_unittest
run_unittest(LoaderTests, InspectLoaderTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
HBehrens/feedsanitizer | django/core/management/commands/startproject.py | 322 | 1680 | from django.core.management.base import copy_helper, CommandError, LabelCommand
from django.utils.importlib import import_module
import os
import re
from random import choice
class Command(LabelCommand):
help = "Creates a Django project directory structure for the given project name in the current directory."
args = "[projectname]"
label = 'project name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, project_name, **options):
# Determine the project_name a bit naively -- by looking at the name of
# the parent directory.
directory = os.getcwd()
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as a project name. Please try another name." % project_name)
copy_helper(self.style, 'project', project_name, directory)
# Create a random SECRET_KEY hash, and put it in the main settings.
main_settings_file = os.path.join(directory, project_name, 'settings.py')
settings_contents = open(main_settings_file, 'r').read()
fp = open(main_settings_file, 'w')
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
settings_contents = re.sub(r"(?<=SECRET_KEY = ')'", secret_key + "'", settings_contents)
fp.write(settings_contents)
fp.close()
| mit |
NirBenTalLab/proorigami-ptgraph | domeval.py | 2 | 18915 | ###############################################################################
#
# domeval.py - functions to evaluate domain decomposition accuracy
#
# File: domeval.py
# Author: Alex Stivala
# Created: December 2007
#
# $Id: domeval.py 3236 2010-01-13 02:06:50Z alexs $
#
###############################################################################
"""
Functions to evaluate domain decomposition accuracy.
The accuracy of a domain decomposition is computed as overlap of predicted
(test) and assigned (reference) residues in the domain decomposition.
If a different number of domains is assigned, the decomposition is scored
as failed an no overlap is computed.
This measure is as defined by Jones et al 1998 'Domain assignment for protein
structure using a consensus approach: Characterization and analysis',
Protein Science 7:233-242.
"""
import os,sys
from Bio.PDB import *
from ptdomain import *
from parsepdom import *
from getdomains import *
def permutations(l):
"""
Return all the permutations of the list l.
Obivously this should only be used for extremely small lists (like less
than 9 members).
Paramters:
l - list to find permutations of
Return value:
list of lists, each list a permutation of l.
"""
if len(l) > 0:
return [ [x] + perms for x in l
for perms in permutations([ e for e in l if e != x]) ]
else:
return [[]]
def compute_overlap_score(test_domlist, ref_domlist):
"""
Compute the overlap score between the two domain decompositions of
the same length represented by test and ref domlist.
For two domain assignment methods, we can't assume they order
their domains the same way, so we have to consider every possible
mapping between them and use the best score, as dicussed in
Veretnik et al 2004 J. Mol. Biol. 339(3):647-678.
Parameters:
test_domlist - list of PTDomain from the test (predicted) decomposition
ref_domlist - list of PTDomain from the reference (gold standard) decomp.
Return value:
The maximum overlap score over all permutations of domains
"""
assert(len(test_domlist) == len(ref_domlist))
# get the lowest and highest residue sequence number in each chain
# of the reference domains and build a dictionary from it.
chain_dict = build_domain_chaindict(ref_domlist)
# compute the total number of residues spanned by the refernce domain list
total_residues = 0
for (min_resnum, max_resnum) in chain_dict.itervalues():
total_residues += max_resnum - min_resnum + 1
# verify that the test decompositino is valid
if not verify_domain_disjoint(test_domlist, chain_dict):
sys.stderr.write('ERROR: invalid domain decomposition (not disjoint)\n')
write_domains(sys.stderr, test_domlist)
return 0.0
# now compute the score for every possible mapping of the domains
# (so it suffices to use every permutation of one of the lists keeping
# the other fixed). Since we don't expect ever more than 8 domains
# (and usually 4 or fewer) this should be ok, but still expensive.
scores = [compute_overlap_score_ordered(test_permutation, ref_domlist,
chain_dict, total_residues)
for test_permutation in permutations(test_domlist)]
return max(scores)
def compute_overlap_score_ordered(test_domlist, ref_domlist,
chain_dict, total_residues):
"""
For two domain lists of the same length, ordered so that
corresponding domains 'line up' compute the overlap score
(discussed above) as the fraction of residues that are assigned to
the same domain.
Note the ordering requirement. For two domain assignment methods,
we can't assume they order their domains the same way, so this
function has to be called multiple times with different orderings
to find the one with the best score.
This is a bit more complicated as we handle multiple chains.
Parameters:
test_domlist - list of PTDomain from the test (predicted) decomposition
ref_domlist - list of PTDomain from the reference (gold standard) decomp.
chain_dict - dict of {chainid : (min_resnum, max_resnum)} built by
caller.
total_resides - total number of residues in protein.
Return value:
overlap score in [0, 1]
"""
assert(len(test_domlist) == len(ref_domlist))
# calculate the overlap score by going through each residue
# and counting 1 for overlap between the two domain decompositions.
#
# TODO: we could probably more efficiently compute this using the
# cut positions and a formula like the one in Emmert-Streib and Mushegian
# 2007 BMC Bioinformatics 8:237
# but this is easier (if very inefficient). Howevr the Emmert-Streib
# equation assumes domains consist of only one segment (since that's
# how their DomainICA algorithm works) so is not general enough for
# our purposes.
overlap_count = 0
for i in range(len(ref_domlist)):
for (chain, (min_resnum, max_resnum)) in chain_dict.iteritems():
for resnum in range(min_resnum, max_resnum+1):
if (test_domlist[i].is_in_domain(chain, resnum) and
ref_domlist[i].is_in_domain(chain, resnum)):
overlap_count += 1
score = float(overlap_count) / float(total_residues)
return score
def domain_eval(test_domlist, ref_domlist):
"""
If the two domain lists are the same length, compute the overlap score
(discussed above) as the fraction of residues that are assigned to
the same domain.
Otherwise, describe the test decomposition as 'undercut' (fewer
domains than reference) or 'overcut' (more domains than reference).
Parameters:
test_domlist - list of PTDomain from the test (predicted) decomposition
ref_domlist - list of PTDomain from the reference (gold standard) decomp.
Return value:
tuple (description, score) where description is
'undercut', 'overcut' or 'correct'
and score is the overlap score in [0,1] if 'correct' otherwise 0.0
"""
if len(test_domlist) < len(ref_domlist):
return ('undercut', 0.0)
elif len(test_domlist) > len(ref_domlist):
return ('overcut', 0.0)
else:
return ('correct', compute_overlap_score(test_domlist, ref_domlist))
def evaluate_domains(domainlist, eval_domain_program, pdbid,
pdb_filename, pdb_struct, chainid=None):
"""
Evaluate the performance of the domain decmoposiotion reprresented by
the supplied domainlist against the program or database eval_domain_program
Parmeters:
domain_list - list of PTDomain for our decomposition
eval_domain_program - 'cath:cdf_file_name' (CATH) or other supported
program or database (see ptdomain.py)
pdbid - PDB identifier for the protein
pdb_filename - name of PDB file (needed for DDOMAIN)
pdb_struct - Bio.PDB parsed structure (needed for DDOMAIN)
chainid - (default None). If not None, only use this chain.
Return value: tuple the (num_domains, description, score)
where num_domains is the number of domains
in the reference (ie from the eval_domain_program)
and decription and score are from thetuple from domain_eval
(see domeval.py)
"""
ref_domain_list = get_domains(eval_domain_program,
pdbid, pdb_filename, pdb_struct,
chainid)
num_domains = len(ref_domain_list)
if verbose:
print eval_domain_program
write_domains(sys.stdout, ref_domain_list)
(description, score) = domain_eval(domainlist, ref_domain_list)
return (num_domains, description, score)
def run_on_pdomains_list(pdbid_list,
pdb_root,
pdomains_filename,
print_results,
get_domains_function,
*get_domains_args):
"""
Run the supplied domain decomposition function get_domains_fuction
(with args get_domains_args) over all pDomains benchmark chains in
the specified list of PDB/chain identifiers.
Used for training/testing/crossvalidation for tuning parameters etc.
Parameters:
pdbid_list - list of PDB/chain identifiers (keys in dict built by
parse_pdomains_file())
pdb_root - root of the PDB divided hierarchy to find PDB files.
pdomains_filename - the fiename of the pDomains benchmark file
print_results - If True, write results of each chain to stdout.
get_domains_fuction - A function that, given the following args,
in order:
pdbid - PDB identifier
pdb_filename - PDB file name
pdb_struct - Bio.PDB parsed PDB structure
chainid - If not None, chain idnetifier to process
and then those
in get_domains_args, returns a domain decomposition
in the form of a list of PTDomain objects.
get_domains_args - variable args list for get_dmoains_function
Return value:
tuple (undercut, overcut, correct, avgscore,
num_correct_assign, numdonmains_dict, num_processed)
where undercut, overcut, correct are number of domains that were
undercut (too few domains) overcut (too many domains) or had the
correct nubmer of domains, respectively, and avgscore is the
average score for all chains (scoring 0.0 for undercut/overcut)
and num_correct_assign is number correctly assigned (number of
domains correct and score over threshold)
and numdomains_dict is a dictionary of
{ num_domains : (frequency, totalscore, avgscore,undercut,overcut,num_correct_domains,num_correct_assign)}
mapping number of domains to scores for chains with that number of
domains.
Raises Exceptions:
ValueError if bad return value from evaluate_domains()
"""
THRESHOLD_SCORE = 0.75 # must be above this to be correct
total_score = 0.0
num_undercut = 0
num_overcut = 0
num_correct = 0
num_correct_assign = 0
num_processed = 0
numdomains_dict = {} # described in docstring above
for pdbchain in pdbid_list:
if len(pdbchain) > 4:
pdbid = pdbchain[:4]
chainid = pdbchain[4]
else:
pdbid = pdbchain[:4]
chainid = None
pdb_dir = os.path.join(pdb_root, pdbid[1:3].lower())
pdb_filename = os.path.join(pdb_dir, 'pdb' + pdbid.lower() + '.ent.gz')
if not os.path.exists(pdb_filename):
sys.stderr.write("WARNING: pdb file " + pdb_filename +
" not found, skipping\n")
continue
# check for compressed files. We only support gzip (.gz)
# Note we are not using the zlib or GzipFile python modules
# since we are calling to external programs which require the
# file uncompressed themsevles anyway so we'll just run gzip
# to uncompress the file to a temporary directory.
pdb_file_basename = os.path.basename(pdb_filename)
(name,extension) = os.path.splitext(pdb_file_basename)
if extension == '.gz':
TMPDIR = os.tempnam(None, "ptgz")
os.mkdir(TMPDIR)
tmp_pdbfilename = os.path.join(TMPDIR, name)
os.system("gzip " + pdb_filename + " -d -c > " + tmp_pdbfilename)
our_pdb_filename = tmp_pdbfilename
used_tmp_file = True
else:
our_pdb_filename = pdb_filename
used_tmp_file = False
try:
# parse PDB file
pdb_parser = PDBParser()
pdb_struct = pdb_parser.get_structure(pdbid, our_pdb_filename)
# run the domain decomposition method and evaluate results
domainlist = get_domains_function(pdbid, our_pdb_filename,
pdb_struct,
chainid,
*get_domains_args)
if domainlist == None:
if chainid == None:
chainname = ''
else:
chainname = 'chain ' + chainid
sys.stderr.write('WARNING: domain decomposition failed for '
+ pdbid + ' ' + chainname + '\n')
continue
evalresult = evaluate_domains(domainlist,
"pdomains:" + pdomains_filename,
pdbid,
our_pdb_filename, pdb_struct,
chainid)
(num_domains, description, score) = evalresult
num_processed += 1
finally:
if used_tmp_file:
cleanup_tmpdir(TMPDIR)
assigndescr = 'incorrect'
if description == 'undercut':
num_undercut += 1
elif description == 'overcut':
num_overcut += 1
elif description == 'correct':
num_correct += 1
if score > THRESHOLD_SCORE:
num_correct_assign += 1
assigndescr = 'correct'
else:
raise ValueError('unknown description ' + description +
' from evaluate_domains\n')
if print_results:
sys.stdout.write(pdbchain + '\t' + str(num_domains) + '\t' +
description + '\t' + str(score) + ' ' +
assigndescr + '\n' )
total_score += score
if numdomains_dict.has_key(num_domains):
(dfrequency, dtotalscore, davgscore,
dundercut,dovercut,dnum_correct_domains,dnum_correct_assign) = \
numdomains_dict[num_domains]
else:
dfrequency = 0
dtotalscore = 0.0
davgscore = 0.0
dundercut = 0
dovercut = 0
dnum_correct_domains = 0
dnum_correct_assign = 0
dfrequency += 1
dtotalscore += score
if description == 'undercut':
dundercut += 1
elif description == 'overcut':
dovercut += 1
elif description == 'correct':
dnum_correct_domains += 1
if score > THRESHOLD_SCORE:
dnum_correct_assign += 1
else:
assert(False)
numdomains_dict[num_domains] = (dfrequency, dtotalscore, davgscore,
dundercut,dovercut,
dnum_correct_domains,
dnum_correct_assign)
for num_domains in numdomains_dict.iterkeys():
(freq, total, avg, dunder,dover,dnumcd,dnumca) = numdomains_dict[num_domains]
avg = total / float(freq)
numdomains_dict[num_domains] = (freq,total,avg,dunder,dover,dnumcd,dnumca)
avgscore = total_score / float(num_processed)
return (num_undercut, num_overcut, num_correct, avgscore,
num_correct_assign, numdomains_dict, num_processed)
def run_on_pdomains_file(pdb_root,
pdomains_filename,
print_results,
get_domains_function,
*get_domains_args):
"""
Run the domain decomposition over all pDomains benchmark chains in
the specified pDomains benchamge file.
Used for training/testing/crossvalidation for tuning parameters etc.
Parameters:
pdb_root - root of the PDB divided hierarchy to find PDB files.
pdomains_filename - the fiename of the pDomains benchmark file
print_results - If True, print results for each chain to stdout
get_domains_fuction - A function that, given the following args,
in order:
pdbid - PDB identifier
pdb_filename - PDB file name
pdb_struct - Bio.PDB parsed PDB structure
chainid - if not None, chain identnifier of chain
and then those
in get_domains_args, returns a domain decomposition
get_domains_args - variable args for get-domains_function
Return value:
tuple (undercut, overcut, correct, avgscore,
num_correct_assign, numdomain_dict)
as described in run_on_pdomains_list()
where undercut, overcut, correct are number of domains that were
undercut (too few domains) overcut (too many domains) or had the
correct nubmer of domains, respectively, and avgscore is the
average score for all chains (scoring 0.0 for undercut/overcut)
and num_correct_assign is number assigned correctly (correct domain
number and score over threshold)
and numdonains_dict maps number of domains to scores,
as described in run_on_pdomains_list()
"""
pdomains = parse_pdomains_file(open(pdomains_filename))
return run_on_pdomains_list(pdomains.keys(), pdb_root, pdomains_filename,
print_results,
get_domains_function,
*get_domains_args)
def print_scores(num_processed,
num_undercut, num_overcut, num_correct, num_correct_assign,
avgscore, indent=0):
"""
Neatly format the scores to stdout.
Parameters:
num_undercut - number overcut
num_overcut - number undercut
num_correct - number of correctly assigned domain numbers
num_correct_assign - number of correctly assigned domains
avgscore - average score in [0,1]
indent - (default 0) number of spaces to indent
Return value:
None
"""
sys.stdout.write(indent*' ')
sys.stdout.write("number processed: %d\n" % num_processed)
sys.stdout.write(indent*' ')
sys.stdout.write("undercut: %d\n" % num_undercut)
sys.stdout.write(indent*' ')
sys.stdout.write("overcut: %d\n" % num_overcut)
sys.stdout.write(indent*' ')
sys.stdout.write("correct domains: %d\n" % num_correct)
sys.stdout.write(indent*' ')
sys.stdout.write("correct assign: %d\n" % num_correct_assign)
sys.stdout.write(indent*' ')
sys.stdout.write("average score: %3.1f%%\n" % (avgscore*100.0))
| mit |
0x7678/ZeroNet | src/lib/pybitcointools/bitcoin/py3specials.py | 1 | 3405 | import sys, os
import binascii
import hashlib
if sys.version_info.major == 3:
string_types = (str)
string_or_bytes_types = (str, bytes)
int_types = (int, float)
# Base switching
code_strings = {
2: '01',
10: '0123456789',
16: '0123456789abcdef',
32: 'abcdefghijklmnopqrstuvwxyz234567',
58: '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz',
256: ''.join([chr(x) for x in range(256)])
}
def bin_dbl_sha256(s):
bytes_to_hash = from_string_to_bytes(s)
return hashlib.sha256(hashlib.sha256(bytes_to_hash).digest()).digest()
def lpad(msg, symbol, length):
if len(msg) >= length:
return msg
return symbol * (length - len(msg)) + msg
def get_code_string(base):
if base in code_strings:
return code_strings[base]
else:
raise ValueError("Invalid base!")
def changebase(string, frm, to, minlen=0):
if frm == to:
return lpad(string, get_code_string(frm)[0], minlen)
return encode(decode(string, frm), to, minlen)
def bin_to_b58check(inp, magicbyte=0):
inp_fmtd = from_int_to_byte(magicbyte)+inp
leadingzbytes = 0
for x in inp_fmtd:
if x != 0:
break
leadingzbytes += 1
checksum = bin_dbl_sha256(inp_fmtd)[:4]
return '1' * leadingzbytes + changebase(inp_fmtd+checksum, 256, 58)
def bytes_to_hex_string(b):
if isinstance(b, str):
return b
return ''.join('{:02x}'.format(y) for y in b)
def safe_from_hex(s):
return bytes.fromhex(s)
def from_int_representation_to_bytes(a):
return bytes(str(a), 'utf-8')
def from_int_to_byte(a):
return bytes([a])
def from_byte_to_int(a):
return a
def from_string_to_bytes(a):
return a if isinstance(a, bytes) else bytes(a, 'utf-8')
def safe_hexlify(a):
return str(binascii.hexlify(a), 'utf-8')
def encode(val, base, minlen=0):
base, minlen = int(base), int(minlen)
code_string = get_code_string(base)
result_bytes = bytes()
while val > 0:
curcode = code_string[val % base]
result_bytes = bytes([ord(curcode)]) + result_bytes
val //= base
pad_size = minlen - len(result_bytes)
padding_element = b'\x00' if base == 256 else b'0'
if (pad_size > 0):
result_bytes = padding_element*pad_size + result_bytes
result_string = ''.join([chr(y) for y in result_bytes])
result = result_bytes if base == 256 else result_string
return result
def decode(string, base):
if base == 256 and isinstance(string, str):
string = bytes(bytearray.fromhex(string))
base = int(base)
code_string = get_code_string(base)
result = 0
if base == 256:
def extract(d, cs):
return d
else:
def extract(d, cs):
return cs.find(d if isinstance(d, str) else chr(d))
if base == 16:
string = string.lower()
while len(string) > 0:
result *= base
result += extract(string[0], code_string)
string = string[1:]
return result
def random_string(x):
return str(os.urandom(x))
| gpl-2.0 |
liangwang/m5 | ext/ply/test/testlex.py | 102 | 22409 | # testlex.py
import unittest
try:
import StringIO
except ImportError:
import io as StringIO
import sys
sys.path.insert(0,"..")
sys.tracebacklimit = 0
import ply.lex
def check_expected(result,expected):
if sys.version_info[0] >= 3:
if isinstance(result,str):
result = result.encode('ascii')
if isinstance(expected,str):
expected = expected.encode('ascii')
resultlines = result.splitlines()
expectedlines = expected.splitlines()
if len(resultlines) != len(expectedlines):
return False
for rline,eline in zip(resultlines,expectedlines):
if not rline.endswith(eline):
return False
return True
def run_import(module):
code = "import "+module
exec(code)
del sys.modules[module]
# Tests related to errors and warnings when building lexers
class LexErrorWarningTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_lex_doc1(self):
self.assertRaises(SyntaxError,run_import,"lex_doc1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_doc1.py:18: No regular expression defined for rule 't_NUMBER'\n"))
def test_lex_dup1(self):
self.assertRaises(SyntaxError,run_import,"lex_dup1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup1.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_dup2(self):
self.assertRaises(SyntaxError,run_import,"lex_dup2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup2.py:22: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_dup3(self):
self.assertRaises(SyntaxError,run_import,"lex_dup3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup3.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_empty(self):
self.assertRaises(SyntaxError,run_import,"lex_empty")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No rules of the form t_rulename are defined\n"
"No rules defined for state 'INITIAL'\n"))
def test_lex_error1(self):
run_import("lex_error1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No t_error rule is defined\n"))
def test_lex_error2(self):
self.assertRaises(SyntaxError,run_import,"lex_error2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Rule 't_error' must be defined as a function\n")
)
def test_lex_error3(self):
self.assertRaises(SyntaxError,run_import,"lex_error3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_error3.py:20: Rule 't_error' requires an argument\n"))
def test_lex_error4(self):
self.assertRaises(SyntaxError,run_import,"lex_error4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_error4.py:20: Rule 't_error' has too many arguments\n"))
def test_lex_ignore(self):
self.assertRaises(SyntaxError,run_import,"lex_ignore")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_ignore.py:20: Rule 't_ignore' must be defined as a string\n"))
def test_lex_ignore2(self):
run_import("lex_ignore2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"t_ignore contains a literal backslash '\\'\n"))
def test_lex_re1(self):
self.assertRaises(SyntaxError,run_import,"lex_re1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid regular expression for rule 't_NUMBER'. unbalanced parenthesis\n"))
def test_lex_re2(self):
self.assertRaises(SyntaxError,run_import,"lex_re2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Regular expression for rule 't_PLUS' matches empty string\n"))
def test_lex_re3(self):
self.assertRaises(SyntaxError,run_import,"lex_re3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid regular expression for rule 't_POUND'. unbalanced parenthesis\n"
"Make sure '#' in rule 't_POUND' is escaped with '\\#'\n"))
def test_lex_rule1(self):
self.assertRaises(SyntaxError,run_import,"lex_rule1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"t_NUMBER not defined as a function or string\n"))
def test_lex_rule2(self):
self.assertRaises(SyntaxError,run_import,"lex_rule2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_rule2.py:18: Rule 't_NUMBER' requires an argument\n"))
def test_lex_rule3(self):
self.assertRaises(SyntaxError,run_import,"lex_rule3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_rule3.py:18: Rule 't_NUMBER' has too many arguments\n"))
def test_lex_state1(self):
self.assertRaises(SyntaxError,run_import,"lex_state1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"states must be defined as a tuple or list\n"))
def test_lex_state2(self):
self.assertRaises(SyntaxError,run_import,"lex_state2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid state specifier 'comment'. Must be a tuple (statename,'exclusive|inclusive')\n"
"Invalid state specifier 'example'. Must be a tuple (statename,'exclusive|inclusive')\n"))
def test_lex_state3(self):
self.assertRaises(SyntaxError,run_import,"lex_state3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State name 1 must be a string\n"
"No rules defined for state 'example'\n"))
def test_lex_state4(self):
self.assertRaises(SyntaxError,run_import,"lex_state4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State type for state comment must be 'inclusive' or 'exclusive'\n"))
def test_lex_state5(self):
self.assertRaises(SyntaxError,run_import,"lex_state5")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State 'comment' already defined\n"))
def test_lex_state_noerror(self):
run_import("lex_state_noerror")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No error rule is defined for exclusive state 'comment'\n"))
def test_lex_state_norule(self):
self.assertRaises(SyntaxError,run_import,"lex_state_norule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No rules defined for state 'example'\n"))
def test_lex_token1(self):
self.assertRaises(SyntaxError,run_import,"lex_token1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No token list is defined\n"
"Rule 't_NUMBER' defined for an unspecified token NUMBER\n"
"Rule 't_PLUS' defined for an unspecified token PLUS\n"
"Rule 't_MINUS' defined for an unspecified token MINUS\n"
))
def test_lex_token2(self):
self.assertRaises(SyntaxError,run_import,"lex_token2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"tokens must be a list or tuple\n"
"Rule 't_NUMBER' defined for an unspecified token NUMBER\n"
"Rule 't_PLUS' defined for an unspecified token PLUS\n"
"Rule 't_MINUS' defined for an unspecified token MINUS\n"
))
def test_lex_token3(self):
self.assertRaises(SyntaxError,run_import,"lex_token3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Rule 't_MINUS' defined for an unspecified token MINUS\n"))
def test_lex_token4(self):
self.assertRaises(SyntaxError,run_import,"lex_token4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Bad token name '-'\n"))
def test_lex_token5(self):
try:
run_import("lex_token5")
except ply.lex.LexError:
e = sys.exc_info()[1]
self.assert_(check_expected(str(e),"lex_token5.py:19: Rule 't_NUMBER' returned an unknown token type 'NUM'"))
def test_lex_token_dup(self):
run_import("lex_token_dup")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Token 'MINUS' multiply defined\n"))
def test_lex_literal1(self):
self.assertRaises(SyntaxError,run_import,"lex_literal1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid literal '**'. Must be a single character\n"))
def test_lex_literal2(self):
self.assertRaises(SyntaxError,run_import,"lex_literal2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid literals specification. literals must be a sequence of characters\n"))
import os
import subprocess
import shutil
# Tests related to various build options associated with lexers
class LexBuildOptionTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
try:
shutil.rmtree("lexdir")
except OSError:
pass
def test_lex_module(self):
run_import("lex_module")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_object(self):
run_import("lex_object")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_closure(self):
run_import("lex_closure")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_optimize(self):
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("lextab.pyc")
except OSError:
pass
try:
os.remove("lextab.pyo")
except OSError:
pass
run_import("lex_optimize")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lextab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lextab.pyo"))
os.remove("lextab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lextab.pyo"))
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("lextab.pyc")
except OSError:
pass
try:
os.remove("lextab.pyo")
except OSError:
pass
def test_lex_optimize2(self):
try:
os.remove("opt2tab.py")
except OSError:
pass
try:
os.remove("opt2tab.pyc")
except OSError:
pass
try:
os.remove("opt2tab.pyo")
except OSError:
pass
run_import("lex_optimize2")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("opt2tab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize2.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("opt2tab.pyo"))
os.remove("opt2tab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize2.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("opt2tab.pyo"))
try:
os.remove("opt2tab.py")
except OSError:
pass
try:
os.remove("opt2tab.pyc")
except OSError:
pass
try:
os.remove("opt2tab.pyo")
except OSError:
pass
def test_lex_optimize3(self):
try:
shutil.rmtree("lexdir")
except OSError:
pass
os.mkdir("lexdir")
os.mkdir("lexdir/sub")
open("lexdir/__init__.py","w").write("")
open("lexdir/sub/__init__.py","w").write("")
run_import("lex_optimize3")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lexdir/sub/calctab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize3.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lexdir/sub/calctab.pyo"))
os.remove("lexdir/sub/calctab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize3.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lexdir/sub/calctab.pyo"))
try:
shutil.rmtree("lexdir")
except OSError:
pass
def test_lex_opt_alias(self):
try:
os.remove("aliastab.py")
except OSError:
pass
try:
os.remove("aliastab.pyc")
except OSError:
pass
try:
os.remove("aliastab.pyo")
except OSError:
pass
run_import("lex_opt_alias")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("aliastab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_opt_alias.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("aliastab.pyo"))
os.remove("aliastab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_opt_alias.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("aliastab.pyo"))
try:
os.remove("aliastab.py")
except OSError:
pass
try:
os.remove("aliastab.pyc")
except OSError:
pass
try:
os.remove("aliastab.pyo")
except OSError:
pass
def test_lex_many_tokens(self):
try:
os.remove("manytab.py")
except OSError:
pass
try:
os.remove("manytab.pyc")
except OSError:
pass
try:
os.remove("manytab.pyo")
except OSError:
pass
run_import("lex_many_tokens")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(TOK34,'TOK34:',1,0)\n"
"(TOK143,'TOK143:',1,7)\n"
"(TOK269,'TOK269:',1,15)\n"
"(TOK372,'TOK372:',1,23)\n"
"(TOK452,'TOK452:',1,31)\n"
"(TOK561,'TOK561:',1,39)\n"
"(TOK999,'TOK999:',1,47)\n"
))
self.assert_(os.path.exists("manytab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_many_tokens.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(TOK34,'TOK34:',1,0)\n"
"(TOK143,'TOK143:',1,7)\n"
"(TOK269,'TOK269:',1,15)\n"
"(TOK372,'TOK372:',1,23)\n"
"(TOK452,'TOK452:',1,31)\n"
"(TOK561,'TOK561:',1,39)\n"
"(TOK999,'TOK999:',1,47)\n"
))
self.assert_(os.path.exists("manytab.pyo"))
os.remove("manytab.pyo")
try:
os.remove("manytab.py")
except OSError:
pass
try:
os.remove("manytab.pyc")
except OSError:
pass
try:
os.remove("manytab.pyo")
except OSError:
pass
# Tests related to run-time behavior of lexers
class LexRunTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_lex_hedit(self):
run_import("lex_hedit")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(H_EDIT_DESCRIPTOR,'abc',1,0)\n"
"(H_EDIT_DESCRIPTOR,'abcdefghij',1,6)\n"
"(H_EDIT_DESCRIPTOR,'xy',1,20)\n"))
def test_lex_state_try(self):
run_import("lex_state_try")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,'3',1,0)\n"
"(PLUS,'+',1,2)\n"
"(NUMBER,'4',1,4)\n"
"Entering comment state\n"
"comment body LexToken(body_part,'This is a comment */',1,9)\n"
"(PLUS,'+',1,30)\n"
"(NUMBER,'10',1,32)\n"
))
unittest.main()
| bsd-3-clause |
lightenna/structuredfun-core | vendor/doctrine/orm/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| apache-2.0 |
eammx/proyectosWeb | proyectoPython/env/lib/python3.6/site-packages/pip/_vendor/html5lib/serializer.py | 56 | 15758 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for example::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
| mit |
Tithen-Firion/youtube-dl | youtube_dl/extractor/ustudio.py | 64 | 4392 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
unescapeHTML,
)
class UstudioIE(InfoExtractor):
IE_NAME = 'ustudio'
_VALID_URL = r'https?://(?:(?:www|v1)\.)?ustudio\.com/video/(?P<id>[^/]+)/(?P<display_id>[^/?#&]+)'
_TEST = {
'url': 'http://ustudio.com/video/Uxu2my9bgSph/san_francisco_golden_gate_bridge',
'md5': '58bbfca62125378742df01fc2abbdef6',
'info_dict': {
'id': 'Uxu2my9bgSph',
'display_id': 'san_francisco_golden_gate_bridge',
'ext': 'mp4',
'title': 'San Francisco: Golden Gate Bridge',
'description': 'md5:23925500697f2c6d4830e387ba51a9be',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20111107',
'uploader': 'Tony Farley',
}
}
def _real_extract(self, url):
video_id, display_id = re.match(self._VALID_URL, url).groups()
config = self._download_xml(
'http://v1.ustudio.com/embed/%s/ustudio/config.xml' % video_id,
display_id)
def extract(kind):
return [{
'url': unescapeHTML(item.attrib['url']),
'width': int_or_none(item.get('width')),
'height': int_or_none(item.get('height')),
} for item in config.findall('./qualities/quality/%s' % kind) if item.get('url')]
formats = extract('video')
self._sort_formats(formats)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
upload_date = unified_strdate(self._search_regex(
r'(?s)Uploaded by\s*.+?\s*on\s*<span>([^<]+)</span>',
webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'Uploaded by\s*<a[^>]*>([^<]+)<',
webpage, 'uploader', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnails': extract('image'),
'upload_date': upload_date,
'uploader': uploader,
'formats': formats,
}
class UstudioEmbedIE(InfoExtractor):
IE_NAME = 'ustudio:embed'
_VALID_URL = r'https?://(?:(?:app|embed)\.)?ustudio\.com/embed/(?P<uid>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://app.ustudio.com/embed/DeN7VdYRDKhP/Uw7G1kMCe65T',
'md5': '47c0be52a09b23a7f40de9469cec58f4',
'info_dict': {
'id': 'Uw7G1kMCe65T',
'ext': 'mp4',
'title': '5 Things IT Should Know About Video',
'description': 'md5:93d32650884b500115e158c5677d25ad',
'uploader_id': 'DeN7VdYRDKhP',
}
}
def _real_extract(self, url):
uploader_id, video_id = re.match(self._VALID_URL, url).groups()
video_data = self._download_json(
'http://app.ustudio.com/embed/%s/%s/config.json' % (uploader_id, video_id),
video_id)['videos'][0]
title = video_data['name']
formats = []
for ext, qualities in video_data.get('transcodes', {}).items():
for quality in qualities:
quality_url = quality.get('url')
if not quality_url:
continue
height = int_or_none(quality.get('height'))
formats.append({
'format_id': '%s-%dp' % (ext, height) if height else ext,
'url': quality_url,
'width': int_or_none(quality.get('width')),
'height': height,
})
self._sort_formats(formats)
thumbnails = []
for image in video_data.get('images', []):
image_url = image.get('url')
if not image_url:
continue
thumbnails.append({
'url': image_url,
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'uploader_id': uploader_id,
'tags': video_data.get('keywords'),
'thumbnails': thumbnails,
'formats': formats,
}
| unlicense |
dsullivan7/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
uniphil/heroku-buildpack-pythonsass | vendor/setuptools-2.1/setup.py | 5 | 7973 | #!/usr/bin/env python
"""Distutils setup file, used to install or test 'setuptools'"""
import sys
import os
import textwrap
# Allow to run setup.py from another directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
src_root = None
from distutils.util import convert_path
command_ns = {}
init_path = convert_path('setuptools/command/__init__.py')
with open(init_path) as init_file:
exec(init_file.read(), command_ns)
SETUP_COMMANDS = command_ns['__all__']
main_ns = {}
ver_path = convert_path('setuptools/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
import setuptools
from setuptools.command.build_py import build_py as _build_py
from setuptools.command.test import test as _test
scripts = []
def _gen_console_scripts():
yield "easy_install = setuptools.command.easy_install:main"
# Gentoo distributions manage the python-version-specific scripts
# themselves, so those platforms define an environment variable to
# suppress the creation of the version-specific scripts.
var_names = (
'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
)
if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
return
yield ("easy_install-{shortver} = setuptools.command.easy_install:main"
.format(shortver=sys.version[:3]))
console_scripts = list(_gen_console_scripts())
# specific command that is used to generate windows .exe files
class build_py(_build_py):
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
class test(_test):
"""Specific test class to avoid rewriting the entry_points.txt"""
def run(self):
entry_points = os.path.join('setuptools.egg-info', 'entry_points.txt')
if not os.path.exists(entry_points):
_test.run(self)
return # even though _test.run will raise SystemExit
# save the content
with open(entry_points) as f:
ep_content = f.read()
# run the test
try:
_test.run(self)
finally:
# restore the file
with open(entry_points, 'w') as f:
f.write(ep_content)
readme_file = open('README.txt')
# the release script adds hyperlinks to issues
if os.path.exists('CHANGES (links).txt'):
changes_file = open('CHANGES (links).txt')
else:
# but if the release script has not run, fall back to the source file
changes_file = open('CHANGES.txt')
with readme_file:
with changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
package_data = {'setuptools': ['site-patch.py']}
force_windows_specific_files = (
os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES")
not in (None, "", "0")
)
if sys.platform == 'win32' or force_windows_specific_files:
package_data.setdefault('setuptools', []).extend(['*.exe'])
package_data.setdefault('setuptools.command', []).extend(['*.xml'])
setup_params = dict(
name="setuptools",
version=main_ns['__version__'],
description="Easily download, build, install, upgrade, and uninstall "
"Python packages",
author="Python Packaging Authority",
author_email="distutils-sig@python.org",
license="PSF or ZPL",
long_description = long_description,
keywords = "CPAN PyPI distutils eggs package management",
url = "https://pypi.python.org/pypi/setuptools",
test_suite = 'setuptools.tests',
src_root = src_root,
packages = setuptools.find_packages(),
package_data = package_data,
py_modules = ['pkg_resources', 'easy_install'],
zip_safe = (sys.version>="2.5"), # <2.5 needs unzipped for -m to work
cmdclass = {'test': test},
entry_points = {
"distutils.commands": [
"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
for cmd in SETUP_COMMANDS
],
"distutils.setup_keywords": [
"eager_resources = setuptools.dist:assert_string_list",
"namespace_packages = setuptools.dist:check_nsp",
"extras_require = setuptools.dist:check_extras",
"install_requires = setuptools.dist:check_requirements",
"tests_require = setuptools.dist:check_requirements",
"entry_points = setuptools.dist:check_entry_points",
"test_suite = setuptools.dist:check_test_suite",
"zip_safe = setuptools.dist:assert_bool",
"package_data = setuptools.dist:check_package_data",
"exclude_package_data = setuptools.dist:check_package_data",
"include_package_data = setuptools.dist:assert_bool",
"packages = setuptools.dist:check_packages",
"dependency_links = setuptools.dist:assert_string_list",
"test_loader = setuptools.dist:check_importable",
"use_2to3 = setuptools.dist:assert_bool",
"convert_2to3_doctests = setuptools.dist:assert_string_list",
"use_2to3_fixers = setuptools.dist:assert_string_list",
"use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
],
"egg_info.writers": [
"PKG-INFO = setuptools.command.egg_info:write_pkg_info",
"requires.txt = setuptools.command.egg_info:write_requirements",
"entry_points.txt = setuptools.command.egg_info:write_entries",
"eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg",
"top_level.txt = setuptools.command.egg_info:write_toplevel_names",
"depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
"dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
],
"console_scripts": console_scripts,
"setuptools.file_finders":
["svn_cvs = setuptools.command.sdist:_default_revctrl"],
"setuptools.installation":
['eggsecutable = setuptools.command.easy_install:bootstrap'],
},
classifiers = textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: Python Software Foundation License
License :: OSI Approved :: Zope Public License
Operating System :: OS Independent
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.1
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
extras_require = {
"ssl:sys_platform=='win32'": "wincertstore==0.1",
"certs": "certifi==0.0.8",
},
dependency_links = [
'https://pypi.python.org/packages/source/c/certifi/certifi-0.0.8.tar.gz#md5=dc5f5e7f0b5fc08d27654b17daa6ecec',
'https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.1.zip#md5=2f9accbebe8f7b4c06ac7aa83879b81c',
],
scripts = [],
# tests_require = "setuptools[ssl]",
)
if __name__ == '__main__':
dist = setuptools.setup(**setup_params)
| mit |
msabramo/ansible | lib/ansible/module_utils/ansible_tower.py | 50 | 3887 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Wayne Witzel III <wayne@riotousliving.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tower_cli.utils.exceptions as exc
from tower_cli.utils import parser
from tower_cli.api import client
def tower_auth_config(module):
'''tower_auth_config attempts to load the tower-cli.cfg file
specified from the `tower_config_file` parameter. If found,
if returns the contents of the file as a dictionary, else
it will attempt to fetch values from the module pararms and
only pass those values that have been set.
'''
config_file = module.params.get('tower_config_file')
if config_file:
config_file = os.path.expanduser(config_file)
if not os.path.exists(config_file):
module.fail_json(msg='file not found: %s' % config_file)
if os.path.isdir(config_file):
module.fail_json(msg='directory can not be used as config file: %s' % config_file)
with open(config_file, 'rb') as f:
return parser.string_to_dict(f.read())
else:
auth_config = {}
host = module.params.get('tower_host')
if host:
auth_config['host'] = host
username = module.params.get('tower_username')
if username:
auth_config['username'] = username
password = module.params.get('tower_password')
if password:
auth_config['password'] = password
verify_ssl = module.params.get('tower_verify_ssl')
if verify_ssl:
auth_config['verify_ssl'] = verify_ssl
return auth_config
def tower_check_mode(module):
'''Execute check mode logic for Ansible Tower modules'''
if module.check_mode:
try:
result = client.get('/ping').json()
module.exit_json(changed=True, tower_version='{0}'.format(result['version']))
except (exc.ServerError, exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(changed=False, msg='Failed check mode: {0}'.format(excinfo))
def tower_argument_spec():
return dict(
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
)
| gpl-3.0 |
cchristelis/feti | django_project/core/wsgi.py | 2 | 2100 | # coding=utf-8
"""
WSGI config for projecta project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We put this here so that low level uwsgi errors also get reported
# noinspection PyUnresolvedReferences
# pylint:
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry # noqa
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "feti.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
# Customised by Tim so we can access env vars set in apache
import django.core.handlers.wsgi # noqa
_application = get_wsgi_application()
def application(environ, start_response):
"""Factory for the application instance.
:param environ: os environment passed in by web server.
:type environ: dict
:param start_response: ?
:type start_response: ?
Places env vars defined in apache conf into a context accessible by django.
"""
return _application(environ, start_response)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-2-clause |
YannickB/odoo-hosting | clouder_website/__manifest__.py | 1 | 1442 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron, Nicolas Petit
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Clouder Website',
'version': '10.0.10.0.0',
'category': 'Clouder',
'depends': ['base', 'auth_signup', 'clouder'],
'author': 'Yannick Buron (Clouder), Nicolas Petit',
'license': 'LGPL-3',
'website': 'https://github.com/clouder-community/clouder',
'demo': [],
'data': [
'security/ir.model.access.csv',
'clouder_website_view.xml',
'templates.xml'
],
'installable': True,
'application': True,
}
| agpl-3.0 |
nikhilprathapani/python-for-android | python-modules/twisted/twisted/conch/client/direct.py | 60 | 3234 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import defer, protocol, reactor
from twisted.conch import error
from twisted.conch.ssh import transport
from twisted.python import log
class SSHClientFactory(protocol.ClientFactory):
def __init__(self, d, options, verifyHostKey, userAuthObject):
self.d = d
self.options = options
self.verifyHostKey = verifyHostKey
self.userAuthObject = userAuthObject
def clientConnectionLost(self, connector, reason):
if self.options['reconnect']:
connector.connect()
def clientConnectionFailed(self, connector, reason):
if self.d is None:
return
d, self.d = self.d, None
d.errback(reason)
def buildProtocol(self, addr):
trans = SSHClientTransport(self)
if self.options['ciphers']:
trans.supportedCiphers = self.options['ciphers']
if self.options['macs']:
trans.supportedMACs = self.options['macs']
if self.options['compress']:
trans.supportedCompressions[0:1] = ['zlib']
if self.options['host-key-algorithms']:
trans.supportedPublicKeys = self.options['host-key-algorithms']
return trans
class SSHClientTransport(transport.SSHClientTransport):
def __init__(self, factory):
self.factory = factory
self.unixServer = None
def connectionLost(self, reason):
if self.unixServer:
d = self.unixServer.stopListening()
self.unixServer = None
else:
d = defer.succeed(None)
d.addCallback(lambda x:
transport.SSHClientTransport.connectionLost(self, reason))
def receiveError(self, code, desc):
if self.factory.d is None:
return
d, self.factory.d = self.factory.d, None
d.errback(error.ConchError(desc, code))
def sendDisconnect(self, code, reason):
if self.factory.d is None:
return
d, self.factory.d = self.factory.d, None
transport.SSHClientTransport.sendDisconnect(self, code, reason)
d.errback(error.ConchError(reason, code))
def receiveDebug(self, alwaysDisplay, message, lang):
log.msg('Received Debug Message: %s' % message)
if alwaysDisplay: # XXX what should happen here?
print message
def verifyHostKey(self, pubKey, fingerprint):
return self.factory.verifyHostKey(self, self.transport.getPeer().host, pubKey,
fingerprint)
def setService(self, service):
log.msg('setting client server to %s' % service)
transport.SSHClientTransport.setService(self, service)
if service.name != 'ssh-userauth' and self.factory.d is not None:
d, self.factory.d = self.factory.d, None
d.callback(None)
def connectionSecure(self):
self.requestService(self.factory.userAuthObject)
def connect(host, port, options, verifyHostKey, userAuthObject):
d = defer.Deferred()
factory = SSHClientFactory(d, options, verifyHostKey, userAuthObject)
reactor.connectTCP(host, port, factory)
return d
| apache-2.0 |
antring/RssThingyServer | server.py | 1 | 1585 | __author__ = 'Antring'
import urllib.request
import os
import feedparser
import datetime
from time import *
class rssReader:
'''Class for finding and downloading podcast'''
def __init__(self, url):
self.rssurl = url
self.podstream = feedparser.parse(self.rssurl)
self.podLink = self.podstream.entries[0].link
self.podName = self.podstream.entries[0].title
self.rsslen = len(self.podstream.entries)
def checker(self):
'''Checking if a podcast episode is already downloaded'''
curDir = os.getcwd() #Get current working dir
#TODO Write rest of this...
def downloader(self, podcasturl, filename):
'''For downloading podcasts given a url to the .mp3 and the filename that it should be stored with'''
try:
urllib.request.urlretrieve(podcasturl, filename + '.mp3')
f = open('log.txt', 'a')
f.write('(+)Downloaded ' + filename + ' - ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + ('\n'))
f.close()
except IOError:
f = open('log.txt', 'a')
f.write('(!)Downloader error ' + IOError + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + ('\n'))
f.close()
def infoprinter(self):
'''Prints information about the podcast'''
print("PodLink: ", self.podLink, "\nPodName: ", self.podName, "\nRSSLen: ", self.rsslen)
if __name__ == '__main__':
misjonen = "http://www.p4.no/lyttesenter/podcast.ashx?pid=330"
podcast1 = rssReader(misjonen)
podcast1.infoprinter() | mit |
cosma/phest | fabfile.py | 1 | 3679 | from __future__ import print_function
from fabric.api import local, run, env, parallel, roles, hide, sudo, hosts, \
task, execute, cd, warn_only
from fabric.contrib.files import exists
from fabric.contrib.console import confirm
from fabric.colors import red, green, yellow
from datetime import timedelta
import json
import os
import sys
env.use_ssh_config = True
env.remote_interrupt = True
def ensure_module(module_name):
""" ensure module is installed """
try:
__import__(module_name)
except ImportError:
python = sys.executable
print(red("Could not find {module_name}."
" Installing it for you now:".format(**locals())))
local('sudo {python} -m pip install {module_name}'.format(
**locals()))
def get_current_tag():
local('git fetch')
current_tag = local('git tag --sort version:refname | tail -n1',
capture=True)
return current_tag
def get_next_tag():
current_tag = get_current_tag()
versions = current_tag.split('.')
versions[2] = str(int(versions[2]) + 1)
tag = '.'.join(versions)
return tag
@task
def tag(tag=None):
'''create a tagged from master'''
ensure_module('requests')
branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
if 'master' != branch:
if not confirm(
'You are not in master branch.'
' Do you REALLY want to continue?',
default=False
):
sys.exit(0)
if not tag:
tag = get_next_tag()
import requests
if requests.get(
'https://circleci.com/api/v1/project/cosma/phest/'
'tree/{branch}?limit=1&'
'circle-token=8accb19a030b5c34c84f22616602eea4846472d2'.format(
**locals()), headers={
'Accept': 'application/json'}
).json()[0]['status'] not in ['fixed', 'success']:
if not confirm(
'CircleCI is not green. Do you REALLY want to continue?',
default=False
):
sys.exit(0)
if confirm('do you want to create tag {tag}?'.format(
tag=green(tag)
)):
local('git tag {}'.format(tag))
local('git push --tags')
local('git push')
@task
def git_cleanup():
'''cleanup git locally and remote'''
local('git fetch')
print("Deleting remote tracking branches whose "
"tracked branches on server are gone...")
local('git remote prune origin')
print("Searching all remote branches except master "
"that are already merged into master...")
get_remote_merged_branches = None
get_remote_merged_branches = local(
'git branch -r --merged origin/master'
' | grep -v master | grep -v master || exit 0',
capture=True)
if get_remote_merged_branches:
print(get_remote_merged_branches)
if confirm("Do you want to delete those branches on the server?"):
print("Deleting...")
local("echo '{}' | sed 's#origin/##' | xargs -I {{}}"
" git push origin :{{}}".format(
get_remote_merged_branches))
local('git remote prune origin')
else:
print("ok, will not delete anything.")
else:
print('No remote merged branches found')
print("Deleting all local branches (except current)"
" that are already merged into local master...")
local("git branch --merged master | grep -v master | grep -v master "
"| grep -v '\*' | xargs git branch -d")
print("Checking for unmerged local branches...")
local('git branch --no-merged master')
| mit |
Midrya/chromium | testing_support/super_mox.py | 25 | 5140 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simplify unit tests based on pymox."""
import os
import random
import shutil
import string
import StringIO
import subprocess
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from third_party.pymox import mox
class IsOneOf(mox.Comparator):
def __init__(self, keys):
self._keys = keys
def equals(self, rhs):
return rhs in self._keys
def __repr__(self):
return '<sequence or map containing \'%s\'>' % str(self._keys)
class TestCaseUtils(object):
"""Base class with some additional functionalities. People will usually want
to use SuperMoxTestBase instead."""
# Backup the separator in case it gets mocked
_OS_SEP = os.sep
_RANDOM_CHOICE = random.choice
_RANDOM_RANDINT = random.randint
_STRING_LETTERS = string.letters
## Some utilities for generating arbitrary arguments.
def String(self, max_length):
return ''.join([self._RANDOM_CHOICE(self._STRING_LETTERS)
for _ in xrange(self._RANDOM_RANDINT(1, max_length))])
def Strings(self, max_arg_count, max_arg_length):
return [self.String(max_arg_length) for _ in xrange(max_arg_count)]
def Args(self, max_arg_count=8, max_arg_length=16):
return self.Strings(max_arg_count,
self._RANDOM_RANDINT(1, max_arg_length))
def _DirElts(self, max_elt_count=4, max_elt_length=8):
return self._OS_SEP.join(self.Strings(max_elt_count, max_elt_length))
def Dir(self, max_elt_count=4, max_elt_length=8):
return (self._RANDOM_CHOICE((self._OS_SEP, '')) +
self._DirElts(max_elt_count, max_elt_length))
def RootDir(self, max_elt_count=4, max_elt_length=8):
return self._OS_SEP + self._DirElts(max_elt_count, max_elt_length)
def compareMembers(self, obj, members):
"""If you add a member, be sure to add the relevant test!"""
# Skip over members starting with '_' since they are usually not meant to
# be for public use.
actual_members = [x for x in sorted(dir(obj))
if not x.startswith('_')]
expected_members = sorted(members)
if actual_members != expected_members:
diff = ([i for i in actual_members if i not in expected_members] +
[i for i in expected_members if i not in actual_members])
print >> sys.stderr, diff
# pylint: disable=E1101
self.assertEqual(actual_members, expected_members)
def setUp(self):
self.root_dir = self.Dir()
self.args = self.Args()
self.relpath = self.String(200)
def tearDown(self):
pass
class StdoutCheck(object):
def setUp(self):
# Override the mock with a StringIO, it's much less painful to test.
self._old_stdout = sys.stdout
stdout = StringIO.StringIO()
stdout.flush = lambda: None
sys.stdout = stdout
def tearDown(self):
try:
# If sys.stdout was used, self.checkstdout() must be called.
# pylint: disable=E1101
if not sys.stdout.closed:
self.assertEquals('', sys.stdout.getvalue())
except AttributeError:
pass
sys.stdout = self._old_stdout
def checkstdout(self, expected):
value = sys.stdout.getvalue()
sys.stdout.close()
# pylint: disable=E1101
self.assertEquals(expected, value)
class SuperMoxTestBase(TestCaseUtils, StdoutCheck, mox.MoxTestBase):
def setUp(self):
"""Patch a few functions with know side-effects."""
TestCaseUtils.setUp(self)
mox.MoxTestBase.setUp(self)
os_to_mock = ('chdir', 'chown', 'close', 'closerange', 'dup', 'dup2',
'fchdir', 'fchmod', 'fchown', 'fdopen', 'getcwd', 'lseek',
'makedirs', 'mkdir', 'open', 'popen', 'popen2', 'popen3', 'popen4',
'read', 'remove', 'removedirs', 'rename', 'renames', 'rmdir', 'symlink',
'system', 'tmpfile', 'walk', 'write')
self.MockList(os, os_to_mock)
os_path_to_mock = ('abspath', 'exists', 'getsize', 'isdir', 'isfile',
'islink', 'ismount', 'lexists', 'realpath', 'samefile', 'walk')
self.MockList(os.path, os_path_to_mock)
self.MockList(shutil, ('rmtree'))
self.MockList(subprocess, ('call', 'Popen'))
# Don't mock stderr since it confuses unittests.
self.MockList(sys, ('stdin'))
StdoutCheck.setUp(self)
def tearDown(self):
StdoutCheck.tearDown(self)
TestCaseUtils.tearDown(self)
mox.MoxTestBase.tearDown(self)
def MockList(self, parent, items_to_mock):
for item in items_to_mock:
# Skip over items not present because of OS-specific implementation,
# implemented only in later python version, etc.
if hasattr(parent, item):
try:
self.mox.StubOutWithMock(parent, item)
except TypeError, e:
raise TypeError(
'Couldn\'t mock %s in %s: %s' % (item, parent.__name__, e))
def UnMock(self, obj, name):
"""Restore an object inside a test."""
for (parent, old_child, child_name) in self.mox.stubs.cache:
if parent == obj and child_name == name:
setattr(parent, child_name, old_child)
break
| bsd-3-clause |
MQQiang/kbengine | kbe/res/scripts/common/Lib/test/test_range.py | 67 | 23223 | # Python test set -- built-in functions
import test.support, unittest
import sys
import pickle
import itertools
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class RangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def test_range(self):
self.assertEqual(list(range(3)), [0, 1, 2])
self.assertEqual(list(range(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(range(0)), [])
self.assertEqual(list(range(-3)), [])
self.assertEqual(list(range(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(TypeError, range, 0.0, 2, 1)
self.assertRaises(TypeError, range, 1, 2.0, 1)
self.assertRaises(TypeError, range, 1, 2, 1.0)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2)
r = range(-sys.maxsize, sys.maxsize, 2)
self.assertEqual(len(r), sys.maxsize)
def test_large_operands(self):
x = range(10**20, 10**20+10, 3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
x = range(10**20+10, 10**20, 3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20, 10**20+10, -3)
self.assertEqual(len(x), 0)
self.assertEqual(len(list(x)), 0)
x = range(10**20+10, 10**20, -3)
self.assertEqual(len(x), 4)
self.assertEqual(len(list(x)), 4)
# Now test range() with longs
self.assertEqual(list(range(-2**100)), [])
self.assertEqual(list(range(0, -2**100)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
self.assertEqual(list(range(0, 2**100, -1)), [])
a = int(10 * sys.maxsize)
b = int(100 * sys.maxsize)
c = int(50 * sys.maxsize)
self.assertEqual(list(range(a, a+2)), [a, a+1])
self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1])
self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2])
seq = list(range(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], a)
self.assertEqual(seq[-1], a+c)
seq = list(range(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], b)
self.assertEqual(seq[-1], b-c)
seq = list(range(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertEqual(seq[0], -a)
self.assertEqual(seq[-1], -a-c)
def test_large_range(self):
# Check long ranges (len > sys.maxsize)
# len() is expected to fail due to limitations of the __len__ protocol
def _range_len(x):
try:
length = len(x)
except OverflowError:
step = x[1] - x[0]
length = 1 + ((x[-1] - x[0]) // step)
return length
a = -sys.maxsize
b = sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = 2 * sys.maxsize
expected_len = b - a
x = range(a, b)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+idx)
self.assertEqual(x[idx:idx+1][0], a+idx)
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = 0
b = sys.maxsize**10
c = 2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
a = sys.maxsize**10
b = 0
c = -2*sys.maxsize
expected_len = 1 + (b - a) // c
x = range(a, b, c)
self.assertIn(a, x)
self.assertNotIn(b, x)
self.assertRaises(OverflowError, len, x)
self.assertEqual(_range_len(x), expected_len)
self.assertEqual(x[0], a)
idx = sys.maxsize+1
self.assertEqual(x[idx], a+(idx*c))
self.assertEqual(x[idx:idx+1][0], a+(idx*c))
with self.assertRaises(IndexError):
x[-expected_len-1]
with self.assertRaises(IndexError):
x[expected_len]
def test_invalid_invocation(self):
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
a = int(10 * sys.maxsize)
self.assertRaises(ValueError, range, a, a + 1, int(0))
self.assertRaises(TypeError, range, 1., 1., 1.)
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
# Exercise various combinations of bad arguments, to check
# refcounting logic
self.assertRaises(TypeError, range, 0.0)
self.assertRaises(TypeError, range, 0, 0.0)
self.assertRaises(TypeError, range, 0.0, 0)
self.assertRaises(TypeError, range, 0.0, 0.0)
self.assertRaises(TypeError, range, 0, 0, 1.0)
self.assertRaises(TypeError, range, 0, 0.0, 1)
self.assertRaises(TypeError, range, 0, 0.0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0, 1)
self.assertRaises(TypeError, range, 0.0, 0, 1.0)
self.assertRaises(TypeError, range, 0.0, 0.0, 1)
self.assertRaises(TypeError, range, 0.0, 0.0, 1.0)
def test_index(self):
u = range(2)
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = range(-2, 3)
self.assertEqual(u.count(0), 1)
self.assertEqual(u.index(0), 2)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = range(4)
self.assertRaises(BadExc, a.index, BadCmp())
a = range(-2, 3)
self.assertEqual(a.index(0), 2)
self.assertEqual(range(1, 10, 3).index(4), 1)
self.assertEqual(range(1, -10, -3).index(-5), 2)
self.assertEqual(range(10**20).index(1), 1)
self.assertEqual(range(10**20).index(10**20 - 1), 10**20 - 1)
self.assertRaises(ValueError, range(1, 2**100, 2).index, 2**87)
self.assertEqual(range(1, 2**100, 2).index(2**87+1), 2**86)
class AlwaysEqual(object):
def __eq__(self, other):
return True
always_equal = AlwaysEqual()
self.assertEqual(range(10).index(always_equal), 0)
def test_user_index_method(self):
bignum = 2*sys.maxsize
smallnum = 42
# User-defined class with an __index__ method
class I:
def __init__(self, n):
self.n = int(n)
def __index__(self):
return self.n
self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum])
self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum])
# User-defined class with a failing __index__ method
class IX:
def __index__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# User-defined class with an invalid __index__ method
class IN:
def __index__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
# Test use of user-defined classes in slice indices.
self.assertEqual(range(10)[:I(5)], range(5))
with self.assertRaises(RuntimeError):
range(0, 10)[:IX()]
with self.assertRaises(TypeError):
range(0, 10)[:IN()]
def test_count(self):
self.assertEqual(range(3).count(-1), 0)
self.assertEqual(range(3).count(0), 1)
self.assertEqual(range(3).count(1), 1)
self.assertEqual(range(3).count(2), 1)
self.assertEqual(range(3).count(3), 0)
self.assertIs(type(range(3).count(-1)), int)
self.assertIs(type(range(3).count(1)), int)
self.assertEqual(range(10**20).count(1), 1)
self.assertEqual(range(10**20).count(10**20), 0)
self.assertEqual(range(3).index(1), 1)
self.assertEqual(range(1, 2**100, 2).count(2**87), 0)
self.assertEqual(range(1, 2**100, 2).count(2**87+1), 1)
class AlwaysEqual(object):
def __eq__(self, other):
return True
always_equal = AlwaysEqual()
self.assertEqual(range(10).count(always_equal), 10)
self.assertEqual(len(range(sys.maxsize, sys.maxsize+10)), 10)
def test_repr(self):
self.assertEqual(repr(range(1)), 'range(0, 1)')
self.assertEqual(repr(range(1, 2)), 'range(1, 2)')
self.assertEqual(repr(range(1, 2, 3)), 'range(1, 2, 3)')
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
with self.subTest(proto=proto, test=t):
r = range(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_iterator_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
it = itorg = iter(range(*t))
data = list(range(*t))
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_exhausted_iterator_pickling(self):
r = range(2**65, 2**65+2)
i = iter(r)
while True:
r = next(i)
if r == 2**65+1:
break
d = pickle.dumps(i)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_large_exhausted_iterator_pickling(self):
r = range(20)
i = iter(r)
while True:
r = next(i)
if r == 19:
break
d = pickle.dumps(i)
i2 = pickle.loads(d)
self.assertEqual(list(i), [])
self.assertEqual(list(i2), [])
def test_odd_bug(self):
# This used to raise a "SystemError: NULL result without error"
# because the range validation step was eating the exception
# before NULL was returned.
with self.assertRaises(TypeError):
range([], 1, -1)
def test_types(self):
# Non-integer objects *equal* to any of the range's items are supposed
# to be contained in the range.
self.assertIn(1.0, range(3))
self.assertIn(True, range(3))
self.assertIn(1+0j, range(3))
class C1:
def __eq__(self, other): return True
self.assertIn(C1(), range(3))
# Objects are never coerced into other types for comparison.
class C2:
def __int__(self): return 1
def __index__(self): return 1
self.assertNotIn(C2(), range(3))
# ..except if explicitly told so.
self.assertIn(int(C2()), range(3))
# Check that the range.__contains__ optimization is only
# used for ints, not for instances of subclasses of int.
class C3(int):
def __eq__(self, other): return True
self.assertIn(C3(11), range(10))
self.assertIn(C3(11), list(range(10)))
def test_strided_limits(self):
r = range(0, 101, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertIn(2, r)
self.assertNotIn(99, r)
self.assertIn(100, r)
self.assertNotIn(101, r)
r = range(0, -20, -1)
self.assertIn(0, r)
self.assertIn(-1, r)
self.assertIn(-19, r)
self.assertNotIn(-20, r)
r = range(0, -20, -2)
self.assertIn(-18, r)
self.assertNotIn(-19, r)
self.assertNotIn(-20, r)
def test_empty(self):
r = range(0)
self.assertNotIn(0, r)
self.assertNotIn(1, r)
r = range(0, -10)
self.assertNotIn(0, r)
self.assertNotIn(-1, r)
self.assertNotIn(1, r)
def test_range_iterators(self):
# exercise 'fast' iterators, that use a rangeiterobject internally.
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
iter1 = range(start, end, step)
iter2 = pyrange(start, end, step)
test_id = "range({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
iter1 = reversed(range(start, end, step))
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(range({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_slice(self):
def check(start, stop, step=None):
i = slice(start, stop, step)
self.assertEqual(list(r[i]), list(r)[i])
self.assertEqual(len(r[i]), len(list(r)[i]))
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
check(0, 2)
check(0, 20)
check(1, 2)
check(20, 30)
check(-30, -20)
check(-1, 100, 2)
check(0, -1)
check(-1, -3, -1)
def test_contains(self):
r = range(10)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -1)
self.assertIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(0, 10, 2)
self.assertIn(0, r)
self.assertNotIn(1, r)
self.assertNotIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
r = range(9, -1, -2)
self.assertNotIn(0, r)
self.assertIn(1, r)
self.assertIn(5.0, r)
self.assertNotIn(5.1, r)
self.assertNotIn(-1, r)
self.assertNotIn(10, r)
self.assertNotIn("", r)
def test_reverse_iteration(self):
for r in [range(10),
range(0),
range(1, 9, 3),
range(8, 0, -3),
range(sys.maxsize+1, sys.maxsize+10),
]:
self.assertEqual(list(reversed(r)), list(r)[::-1])
def test_issue11845(self):
r = range(*slice(1, 18, 2).indices(20))
values = {None, 0, 1, -1, 2, -2, 5, -5, 19, -19,
20, -20, 21, -21, 30, -30, 99, -99}
for i in values:
for j in values:
for k in values - {0}:
r[i:j:k]
def test_comparison(self):
test_ranges = [range(0), range(0, -1), range(1, 1, 3),
range(1), range(5, 6), range(5, 6, 2),
range(5, 7, 2), range(2), range(0, 4, 2),
range(0, 5, 2), range(0, 6, 2)]
test_tuples = list(map(tuple, test_ranges))
# Check that equality of ranges matches equality of the corresponding
# tuples for each pair from the test lists above.
ranges_eq = [a == b for a in test_ranges for b in test_ranges]
tuples_eq = [a == b for a in test_tuples for b in test_tuples]
self.assertEqual(ranges_eq, tuples_eq)
# Check that != correctly gives the logical negation of ==
ranges_ne = [a != b for a in test_ranges for b in test_ranges]
self.assertEqual(ranges_ne, [not x for x in ranges_eq])
# Equal ranges should have equal hashes.
for a in test_ranges:
for b in test_ranges:
if a == b:
self.assertEqual(hash(a), hash(b))
# Ranges are unequal to other types (even sequence types)
self.assertIs(range(0) == (), False)
self.assertIs(() == range(0), False)
self.assertIs(range(2) == [0, 1], False)
# Huge integers aren't a problem.
self.assertEqual(range(0, 2**100 - 1, 2),
range(0, 2**100, 2))
self.assertEqual(hash(range(0, 2**100 - 1, 2)),
hash(range(0, 2**100, 2)))
self.assertNotEqual(range(0, 2**100, 2),
range(0, 2**100 + 1, 2))
self.assertEqual(range(2**200, 2**201 - 2**99, 2**100),
range(2**200, 2**201, 2**100))
self.assertEqual(hash(range(2**200, 2**201 - 2**99, 2**100)),
hash(range(2**200, 2**201, 2**100)))
self.assertNotEqual(range(2**200, 2**201, 2**100),
range(2**200, 2**201 + 1, 2**100))
# Order comparisons are not implemented for ranges.
with self.assertRaises(TypeError):
range(0) < range(0)
with self.assertRaises(TypeError):
range(0) > range(0)
with self.assertRaises(TypeError):
range(0) <= range(0)
with self.assertRaises(TypeError):
range(0) >= range(0)
def test_attributes(self):
# test the start, stop and step attributes of range objects
self.assert_attrs(range(0), 0, 0, 1)
self.assert_attrs(range(10), 0, 10, 1)
self.assert_attrs(range(-10), 0, -10, 1)
self.assert_attrs(range(0, 10, 1), 0, 10, 1)
self.assert_attrs(range(0, 10, 3), 0, 10, 3)
self.assert_attrs(range(10, 0, -1), 10, 0, -1)
self.assert_attrs(range(10, 0, -3), 10, 0, -3)
def assert_attrs(self, rangeobj, start, stop, step):
self.assertEqual(rangeobj.start, start)
self.assertEqual(rangeobj.stop, stop)
self.assertEqual(rangeobj.step, step)
with self.assertRaises(AttributeError):
rangeobj.start = 0
with self.assertRaises(AttributeError):
rangeobj.stop = 10
with self.assertRaises(AttributeError):
rangeobj.step = 1
with self.assertRaises(AttributeError):
del rangeobj.start
with self.assertRaises(AttributeError):
del rangeobj.stop
with self.assertRaises(AttributeError):
del rangeobj.step
def test_main():
test.support.run_unittest(RangeTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
atmark-techno/atmark-dist | user/mosquitto/mosquitto-1.3.4/test/broker/08-tls-psk-pub.py | 18 | 1866 | #!/usr/bin/env python
import subprocess
import socket
import ssl
import sys
import time
if sys.version < '2.7':
print("WARNING: SSL not supported on Python 2.6")
exit(0)
if ssl.OPENSSL_VERSION_NUMBER < 0x10000000:
print("WARNING: TLS-PSK not supported on OpenSSL < 1.0")
exit(0)
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("no-psk-test-client", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "psk/test", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish(topic="psk/test", payload="message", qos=0)
broker = subprocess.Popen(['../../src/mosquitto', '-c', '08-tls-psk-pub.conf'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=1889, timeout=20)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
pub = subprocess.Popen(['./c/08-tls-psk-pub.test'], env=env)
if pub.wait():
raise ValueError
exit(1)
if mosq_test.expect_packet(sock, "publish", publish_packet):
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| gpl-2.0 |
shashisp/blumix-webpy | app/gluon/contrib/simplejsonrpc.py | 16 | 5041 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple JSON RPC Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "0.05"
import urllib
from xmlrpclib import Transport, SafeTransport
from cStringIO import StringIO
import random
import sys
try:
import gluon.contrib.simplejson as json # try web2py json serializer
except ImportError:
try:
import json # try stdlib (py2.6)
except:
import simplejson as json # try external module
class JSONRPCError(RuntimeError):
"Error object for remote procedure call fail"
def __init__(self, code, message, data=None):
value = "%s: %s\n%s" % (code, message, '\n'.join(data))
RuntimeError.__init__(self, value)
self.code = code
self.message = message
self.data = data
class JSONDummyParser:
"json wrapper for xmlrpclib parser interfase"
def __init__(self):
self.buf = StringIO()
def feed(self, data):
self.buf.write(data)
def close(self):
return self.buf.getvalue()
class JSONTransportMixin:
"json wrapper for xmlrpclib transport interfase"
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
# todo: add gzip compression
def getparser(self):
# get parser and unmarshaller
parser = JSONDummyParser()
return parser, parser
class JSONTransport(JSONTransportMixin, Transport):
pass
class JSONSafeTransport(JSONTransportMixin, SafeTransport):
pass
class ServerProxy(object):
"JSON RPC Simple Client Service Proxy"
def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):
self.location = uri # server location (url)
self.trace = verbose # show debug messages
self.exceptions = True # raise errors? (JSONRPCError)
self.timeout = None
self.json_request = self.json_response = ''
self.version = version # '2.0' for jsonrpc2
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError("unsupported JSON-RPC protocol")
self.__host, self.__handler = urllib.splithost(uri)
if transport is None:
if type == "https":
transport = JSONSafeTransport()
else:
transport = JSONTransport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def __getattr__(self, attr):
"pseudo method that can be called"
return lambda *args: self.call(attr, *args)
def call(self, method, *args):
"JSON RPC communication (method invocation)"
# build data sent to the service
request_id = random.randint(0, sys.maxint)
data = {'id': request_id, 'method': method, 'params': args, }
if self.version:
data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600
request = json.dumps(data)
# make HTTP request (retry if connection is lost)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# store plain request and response for further debugging
self.json_request = request
self.json_response = response
# parse json data coming from service
# {'version': '1.1', 'id': id, 'result': result, 'error': None}
response = json.loads(response)
self.error = response.get('error', {})
if self.error and self.exceptions:
raise JSONRPCError(self.error.get('code', 0),
self.error.get('message', ''),
self.error.get('data', None))
if response['id'] != request_id:
raise JSONRPCError(0, "JSON Request ID != Response ID")
return response.get('result')
ServiceProxy = ServerProxy
if __name__ == "__main__":
# basic tests:
location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc"
client = ServerProxy(location, verbose='--verbose' in sys.argv,)
print client.add(1, 2)
| mit |
xyzz/vcmi-build | project/jni/python/src/Lib/mhlib.py | 290 | 33434 | """MH interface -- purely object-oriented (well, almost)
Executive summary:
import mhlib
mh = mhlib.MH() # use default mailbox directory and profile
mh = mhlib.MH(mailbox) # override mailbox location (default from profile)
mh = mhlib.MH(mailbox, profile) # override mailbox and profile
mh.error(format, ...) # print error message -- can be overridden
s = mh.getprofile(key) # profile entry (None if not set)
path = mh.getpath() # mailbox pathname
name = mh.getcontext() # name of current folder
mh.setcontext(name) # set name of current folder
list = mh.listfolders() # names of top-level folders
list = mh.listallfolders() # names of all folders, including subfolders
list = mh.listsubfolders(name) # direct subfolders of given folder
list = mh.listallsubfolders(name) # all subfolders of given folder
mh.makefolder(name) # create new folder
mh.deletefolder(name) # delete folder -- must have no subfolders
f = mh.openfolder(name) # new open folder object
f.error(format, ...) # same as mh.error(format, ...)
path = f.getfullname() # folder's full pathname
path = f.getsequencesfilename() # full pathname of folder's sequences file
path = f.getmessagefilename(n) # full pathname of message n in folder
list = f.listmessages() # list of messages in folder (as numbers)
n = f.getcurrent() # get current message
f.setcurrent(n) # set current message
list = f.parsesequence(seq) # parse msgs syntax into list of messages
n = f.getlast() # get last message (0 if no messagse)
f.setlast(n) # set last message (internal use only)
dict = f.getsequences() # dictionary of sequences in folder {name: list}
f.putsequences(dict) # write sequences back to folder
f.createmessage(n, fp) # add message from file f as number n
f.removemessages(list) # remove messages in list from folder
f.refilemessages(list, tofolder) # move messages in list to other folder
f.movemessage(n, tofolder, ton) # move one message to a given destination
f.copymessage(n, tofolder, ton) # copy one message to a given destination
m = f.openmessage(n) # new open message object (costs a file descriptor)
m is a derived class of mimetools.Message(rfc822.Message), with:
s = m.getheadertext() # text of message's headers
s = m.getheadertext(pred) # text of message's headers, filtered by pred
s = m.getbodytext() # text of message's body, decoded
s = m.getbodytext(0) # text of message's body, not decoded
"""
from warnings import warnpy3k
warnpy3k("the mhlib module has been removed in Python 3.0; use the mailbox "
"module instead", stacklevel=2)
del warnpy3k
# XXX To do, functionality:
# - annotate messages
# - send messages
#
# XXX To do, organization:
# - move IntSet to separate file
# - move most Message functionality to module mimetools
# Customizable defaults
MH_PROFILE = '~/.mh_profile'
PATH = '~/Mail'
MH_SEQUENCES = '.mh_sequences'
FOLDER_PROTECT = 0700
# Imported modules
import os
import sys
import re
import mimetools
import multifile
import shutil
from bisect import bisect
__all__ = ["MH","Error","Folder","Message"]
# Exported constants
class Error(Exception):
pass
class MH:
"""Class representing a particular collection of folders.
Optional constructor arguments are the pathname for the directory
containing the collection, and the MH profile to use.
If either is omitted or empty a default is used; the default
directory is taken from the MH profile if it is specified there."""
def __init__(self, path = None, profile = None):
"""Constructor."""
if profile is None: profile = MH_PROFILE
self.profile = os.path.expanduser(profile)
if path is None: path = self.getprofile('Path')
if not path: path = PATH
if not os.path.isabs(path) and path[0] != '~':
path = os.path.join('~', path)
path = os.path.expanduser(path)
if not os.path.isdir(path): raise Error, 'MH() path not found'
self.path = path
def __repr__(self):
"""String representation."""
return 'MH(%r, %r)' % (self.path, self.profile)
def error(self, msg, *args):
"""Routine to print an error. May be overridden by a derived class."""
sys.stderr.write('MH error: %s\n' % (msg % args))
def getprofile(self, key):
"""Return a profile entry, None if not found."""
return pickline(self.profile, key)
def getpath(self):
"""Return the path (the name of the collection's directory)."""
return self.path
def getcontext(self):
"""Return the name of the current folder."""
context = pickline(os.path.join(self.getpath(), 'context'),
'Current-Folder')
if not context: context = 'inbox'
return context
def setcontext(self, context):
"""Set the name of the current folder."""
fn = os.path.join(self.getpath(), 'context')
f = open(fn, "w")
f.write("Current-Folder: %s\n" % context)
f.close()
def listfolders(self):
"""Return the names of the top-level folders."""
folders = []
path = self.getpath()
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
folders.append(name)
folders.sort()
return folders
def listsubfolders(self, name):
"""Return the names of the subfolders in a given folder
(prefixed with the given folder name)."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
nlinks = os.stat(fullname).st_nlink
if nlinks <= 2:
return []
subfolders = []
subnames = os.listdir(fullname)
for subname in subnames:
fullsubname = os.path.join(fullname, subname)
if os.path.isdir(fullsubname):
name_subname = os.path.join(name, subname)
subfolders.append(name_subname)
# Stop looking for subfolders when
# we've seen them all
nlinks = nlinks - 1
if nlinks <= 2:
break
subfolders.sort()
return subfolders
def listallfolders(self):
"""Return the names of all folders and subfolders, recursively."""
return self.listallsubfolders('')
def listallsubfolders(self, name):
"""Return the names of subfolders in a given folder, recursively."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
nlinks = os.stat(fullname).st_nlink
if nlinks <= 2:
return []
subfolders = []
subnames = os.listdir(fullname)
for subname in subnames:
if subname[0] == ',' or isnumeric(subname): continue
fullsubname = os.path.join(fullname, subname)
if os.path.isdir(fullsubname):
name_subname = os.path.join(name, subname)
subfolders.append(name_subname)
if not os.path.islink(fullsubname):
subsubfolders = self.listallsubfolders(
name_subname)
subfolders = subfolders + subsubfolders
# Stop looking for subfolders when
# we've seen them all
nlinks = nlinks - 1
if nlinks <= 2:
break
subfolders.sort()
return subfolders
def openfolder(self, name):
"""Return a new Folder object for the named folder."""
return Folder(self, name)
def makefolder(self, name):
"""Create a new folder (or raise os.error if it cannot be created)."""
protect = pickline(self.profile, 'Folder-Protect')
if protect and isnumeric(protect):
mode = int(protect, 8)
else:
mode = FOLDER_PROTECT
os.mkdir(os.path.join(self.getpath(), name), mode)
def deletefolder(self, name):
"""Delete a folder. This removes files in the folder but not
subdirectories. Raise os.error if deleting the folder itself fails."""
fullname = os.path.join(self.getpath(), name)
for subname in os.listdir(fullname):
fullsubname = os.path.join(fullname, subname)
try:
os.unlink(fullsubname)
except os.error:
self.error('%s not deleted, continuing...' %
fullsubname)
os.rmdir(fullname)
numericprog = re.compile('^[1-9][0-9]*$')
def isnumeric(str):
return numericprog.match(str) is not None
class Folder:
"""Class representing a particular folder."""
def __init__(self, mh, name):
"""Constructor."""
self.mh = mh
self.name = name
if not os.path.isdir(self.getfullname()):
raise Error, 'no folder %s' % name
def __repr__(self):
"""String representation."""
return 'Folder(%r, %r)' % (self.mh, self.name)
def error(self, *args):
"""Error message handler."""
self.mh.error(*args)
def getfullname(self):
"""Return the full pathname of the folder."""
return os.path.join(self.mh.path, self.name)
def getsequencesfilename(self):
"""Return the full pathname of the folder's sequences file."""
return os.path.join(self.getfullname(), MH_SEQUENCES)
def getmessagefilename(self, n):
"""Return the full pathname of a message in the folder."""
return os.path.join(self.getfullname(), str(n))
def listsubfolders(self):
"""Return list of direct subfolders."""
return self.mh.listsubfolders(self.name)
def listallsubfolders(self):
"""Return list of all subfolders."""
return self.mh.listallsubfolders(self.name)
def listmessages(self):
"""Return the list of messages currently present in the folder.
As a side effect, set self.last to the last message (or 0)."""
messages = []
match = numericprog.match
append = messages.append
for name in os.listdir(self.getfullname()):
if match(name):
append(name)
messages = map(int, messages)
messages.sort()
if messages:
self.last = messages[-1]
else:
self.last = 0
return messages
def getsequences(self):
"""Return the set of sequences for the folder."""
sequences = {}
fullname = self.getsequencesfilename()
try:
f = open(fullname, 'r')
except IOError:
return sequences
while 1:
line = f.readline()
if not line: break
fields = line.split(':')
if len(fields) != 2:
self.error('bad sequence in %s: %s' %
(fullname, line.strip()))
key = fields[0].strip()
value = IntSet(fields[1].strip(), ' ').tolist()
sequences[key] = value
return sequences
def putsequences(self, sequences):
"""Write the set of sequences back to the folder."""
fullname = self.getsequencesfilename()
f = None
for key, seq in sequences.iteritems():
s = IntSet('', ' ')
s.fromlist(seq)
if not f: f = open(fullname, 'w')
f.write('%s: %s\n' % (key, s.tostring()))
if not f:
try:
os.unlink(fullname)
except os.error:
pass
else:
f.close()
def getcurrent(self):
"""Return the current message. Raise Error when there is none."""
seqs = self.getsequences()
try:
return max(seqs['cur'])
except (ValueError, KeyError):
raise Error, "no cur message"
def setcurrent(self, n):
"""Set the current message."""
updateline(self.getsequencesfilename(), 'cur', str(n), 0)
def parsesequence(self, seq):
"""Parse an MH sequence specification into a message list.
Attempt to mimic mh-sequence(5) as close as possible.
Also attempt to mimic observed behavior regarding which
conditions cause which error messages."""
# XXX Still not complete (see mh-format(5)).
# Missing are:
# - 'prev', 'next' as count
# - Sequence-Negation option
all = self.listmessages()
# Observed behavior: test for empty folder is done first
if not all:
raise Error, "no messages in %s" % self.name
# Common case first: all is frequently the default
if seq == 'all':
return all
# Test for X:Y before X-Y because 'seq:-n' matches both
i = seq.find(':')
if i >= 0:
head, dir, tail = seq[:i], '', seq[i+1:]
if tail[:1] in '-+':
dir, tail = tail[:1], tail[1:]
if not isnumeric(tail):
raise Error, "bad message list %s" % seq
try:
count = int(tail)
except (ValueError, OverflowError):
# Can't use sys.maxint because of i+count below
count = len(all)
try:
anchor = self._parseindex(head, all)
except Error, msg:
seqs = self.getsequences()
if not head in seqs:
if not msg:
msg = "bad message list %s" % seq
raise Error, msg, sys.exc_info()[2]
msgs = seqs[head]
if not msgs:
raise Error, "sequence %s empty" % head
if dir == '-':
return msgs[-count:]
else:
return msgs[:count]
else:
if not dir:
if head in ('prev', 'last'):
dir = '-'
if dir == '-':
i = bisect(all, anchor)
return all[max(0, i-count):i]
else:
i = bisect(all, anchor-1)
return all[i:i+count]
# Test for X-Y next
i = seq.find('-')
if i >= 0:
begin = self._parseindex(seq[:i], all)
end = self._parseindex(seq[i+1:], all)
i = bisect(all, begin-1)
j = bisect(all, end)
r = all[i:j]
if not r:
raise Error, "bad message list %s" % seq
return r
# Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
try:
n = self._parseindex(seq, all)
except Error, msg:
seqs = self.getsequences()
if not seq in seqs:
if not msg:
msg = "bad message list %s" % seq
raise Error, msg
return seqs[seq]
else:
if n not in all:
if isnumeric(seq):
raise Error, "message %d doesn't exist" % n
else:
raise Error, "no %s message" % seq
else:
return [n]
def _parseindex(self, seq, all):
"""Internal: parse a message number (or cur, first, etc.)."""
if isnumeric(seq):
try:
return int(seq)
except (OverflowError, ValueError):
return sys.maxint
if seq in ('cur', '.'):
return self.getcurrent()
if seq == 'first':
return all[0]
if seq == 'last':
return all[-1]
if seq == 'next':
n = self.getcurrent()
i = bisect(all, n)
try:
return all[i]
except IndexError:
raise Error, "no next message"
if seq == 'prev':
n = self.getcurrent()
i = bisect(all, n-1)
if i == 0:
raise Error, "no prev message"
try:
return all[i-1]
except IndexError:
raise Error, "no prev message"
raise Error, None
def openmessage(self, n):
"""Open a message -- returns a Message object."""
return Message(self, n)
def removemessages(self, list):
"""Remove one or more messages -- may raise os.error."""
errors = []
deleted = []
for n in list:
path = self.getmessagefilename(n)
commapath = self.getmessagefilename(',' + str(n))
try:
os.unlink(commapath)
except os.error:
pass
try:
os.rename(path, commapath)
except os.error, msg:
errors.append(msg)
else:
deleted.append(n)
if deleted:
self.removefromallsequences(deleted)
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def refilemessages(self, list, tofolder, keepsequences=0):
"""Refile one or more messages -- may raise os.error.
'tofolder' is an open folder object."""
errors = []
refiled = {}
for n in list:
ton = tofolder.getlast() + 1
path = self.getmessagefilename(n)
topath = tofolder.getmessagefilename(ton)
try:
os.rename(path, topath)
except os.error:
# Try copying
try:
shutil.copy2(path, topath)
os.unlink(path)
except (IOError, os.error), msg:
errors.append(msg)
try:
os.unlink(topath)
except os.error:
pass
continue
tofolder.setlast(ton)
refiled[n] = ton
if refiled:
if keepsequences:
tofolder._copysequences(self, refiled.items())
self.removefromallsequences(refiled.keys())
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def _copysequences(self, fromfolder, refileditems):
"""Helper for refilemessages() to copy sequences."""
fromsequences = fromfolder.getsequences()
tosequences = self.getsequences()
changed = 0
for name, seq in fromsequences.items():
try:
toseq = tosequences[name]
new = 0
except KeyError:
toseq = []
new = 1
for fromn, ton in refileditems:
if fromn in seq:
toseq.append(ton)
changed = 1
if new and toseq:
tosequences[name] = toseq
if changed:
self.putsequences(tosequences)
def movemessage(self, n, tofolder, ton):
"""Move one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
try:
os.rename(path, topath)
except os.error:
# Try copying
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
os.unlink(path)
self.removefromallsequences([n])
def copymessage(self, n, tofolder, ton):
"""Copy one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
def createmessage(self, n, txt):
"""Create a message, with text from the open file txt."""
path = self.getmessagefilename(n)
backuppath = self.getmessagefilename(',%d' % n)
try:
os.rename(path, backuppath)
except os.error:
pass
ok = 0
BUFSIZE = 16*1024
try:
f = open(path, "w")
while 1:
buf = txt.read(BUFSIZE)
if not buf:
break
f.write(buf)
f.close()
ok = 1
finally:
if not ok:
try:
os.unlink(path)
except os.error:
pass
def removefromallsequences(self, list):
"""Remove one or more messages from all sequences (including last)
-- but not from 'cur'!!!"""
if hasattr(self, 'last') and self.last in list:
del self.last
sequences = self.getsequences()
changed = 0
for name, seq in sequences.items():
if name == 'cur':
continue
for n in list:
if n in seq:
seq.remove(n)
changed = 1
if not seq:
del sequences[name]
if changed:
self.putsequences(sequences)
def getlast(self):
"""Return the last message number."""
if not hasattr(self, 'last'):
self.listmessages() # Set self.last
return self.last
def setlast(self, last):
"""Set the last message number."""
if last is None:
if hasattr(self, 'last'):
del self.last
else:
self.last = last
class Message(mimetools.Message):
def __init__(self, f, n, fp = None):
"""Constructor."""
self.folder = f
self.number = n
if fp is None:
path = f.getmessagefilename(n)
fp = open(path, 'r')
mimetools.Message.__init__(self, fp)
def __repr__(self):
"""String representation."""
return 'Message(%s, %s)' % (repr(self.folder), self.number)
def getheadertext(self, pred = None):
"""Return the message's header text as a string. If an
argument is specified, it is used as a filter predicate to
decide which headers to return (its argument is the header
name converted to lower case)."""
if pred is None:
return ''.join(self.headers)
headers = []
hit = 0
for line in self.headers:
if not line[0].isspace():
i = line.find(':')
if i > 0:
hit = pred(line[:i].lower())
if hit: headers.append(line)
return ''.join(headers)
def getbodytext(self, decode = 1):
"""Return the message's body text as string. This undoes a
Content-Transfer-Encoding, but does not interpret other MIME
features (e.g. multipart messages). To suppress decoding,
pass 0 as an argument."""
self.fp.seek(self.startofbody)
encoding = self.getencoding()
if not decode or encoding in ('', '7bit', '8bit', 'binary'):
return self.fp.read()
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
output = StringIO()
mimetools.decode(self.fp, output, encoding)
return output.getvalue()
def getbodyparts(self):
"""Only for multipart messages: return the message's body as a
list of SubMessage objects. Each submessage object behaves
(almost) as a Message object."""
if self.getmaintype() != 'multipart':
raise Error, 'Content-Type is not multipart/*'
bdry = self.getparam('boundary')
if not bdry:
raise Error, 'multipart/* without boundary param'
self.fp.seek(self.startofbody)
mf = multifile.MultiFile(self.fp)
mf.push(bdry)
parts = []
while mf.next():
n = "%s.%r" % (self.number, 1 + len(parts))
part = SubMessage(self.folder, n, mf)
parts.append(part)
mf.pop()
return parts
def getbody(self):
"""Return body, either a string or a list of messages."""
if self.getmaintype() == 'multipart':
return self.getbodyparts()
else:
return self.getbodytext()
class SubMessage(Message):
def __init__(self, f, n, fp):
"""Constructor."""
Message.__init__(self, f, n, fp)
if self.getmaintype() == 'multipart':
self.body = Message.getbodyparts(self)
else:
self.body = Message.getbodytext(self)
self.bodyencoded = Message.getbodytext(self, decode=0)
# XXX If this is big, should remember file pointers
def __repr__(self):
"""String representation."""
f, n, fp = self.folder, self.number, self.fp
return 'SubMessage(%s, %s, %s)' % (f, n, fp)
def getbodytext(self, decode = 1):
if not decode:
return self.bodyencoded
if type(self.body) == type(''):
return self.body
def getbodyparts(self):
if type(self.body) == type([]):
return self.body
def getbody(self):
return self.body
class IntSet:
"""Class implementing sets of integers.
This is an efficient representation for sets consisting of several
continuous ranges, e.g. 1-100,200-400,402-1000 is represented
internally as a list of three pairs: [(1,100), (200,400),
(402,1000)]. The internal representation is always kept normalized.
The constructor has up to three arguments:
- the string used to initialize the set (default ''),
- the separator between ranges (default ',')
- the separator between begin and end of a range (default '-')
The separators must be strings (not regexprs) and should be different.
The tostring() function yields a string that can be passed to another
IntSet constructor; __repr__() is a valid IntSet constructor itself.
"""
# XXX The default begin/end separator means that negative numbers are
# not supported very well.
#
# XXX There are currently no operations to remove set elements.
def __init__(self, data = None, sep = ',', rng = '-'):
self.pairs = []
self.sep = sep
self.rng = rng
if data: self.fromstring(data)
def reset(self):
self.pairs = []
def __cmp__(self, other):
return cmp(self.pairs, other.pairs)
def __hash__(self):
return hash(self.pairs)
def __repr__(self):
return 'IntSet(%r, %r, %r)' % (self.tostring(), self.sep, self.rng)
def normalize(self):
self.pairs.sort()
i = 1
while i < len(self.pairs):
alo, ahi = self.pairs[i-1]
blo, bhi = self.pairs[i]
if ahi >= blo-1:
self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
else:
i = i+1
def tostring(self):
s = ''
for lo, hi in self.pairs:
if lo == hi: t = repr(lo)
else: t = repr(lo) + self.rng + repr(hi)
if s: s = s + (self.sep + t)
else: s = t
return s
def tolist(self):
l = []
for lo, hi in self.pairs:
m = range(lo, hi+1)
l = l + m
return l
def fromlist(self, list):
for i in list:
self.append(i)
def clone(self):
new = IntSet()
new.pairs = self.pairs[:]
return new
def min(self):
return self.pairs[0][0]
def max(self):
return self.pairs[-1][-1]
def contains(self, x):
for lo, hi in self.pairs:
if lo <= x <= hi: return True
return False
def append(self, x):
for i in range(len(self.pairs)):
lo, hi = self.pairs[i]
if x < lo: # Need to insert before
if x+1 == lo:
self.pairs[i] = (x, hi)
else:
self.pairs.insert(i, (x, x))
if i > 0 and x-1 == self.pairs[i-1][1]:
# Merge with previous
self.pairs[i-1:i+1] = [
(self.pairs[i-1][0],
self.pairs[i][1])
]
return
if x <= hi: # Already in set
return
i = len(self.pairs) - 1
if i >= 0:
lo, hi = self.pairs[i]
if x-1 == hi:
self.pairs[i] = lo, x
return
self.pairs.append((x, x))
def addpair(self, xlo, xhi):
if xlo > xhi: return
self.pairs.append((xlo, xhi))
self.normalize()
def fromstring(self, data):
new = []
for part in data.split(self.sep):
list = []
for subp in part.split(self.rng):
s = subp.strip()
list.append(int(s))
if len(list) == 1:
new.append((list[0], list[0]))
elif len(list) == 2 and list[0] <= list[1]:
new.append((list[0], list[1]))
else:
raise ValueError, 'bad data passed to IntSet'
self.pairs = self.pairs + new
self.normalize()
# Subroutines to read/write entries in .mh_profile and .mh_sequences
def pickline(file, key, casefold = 1):
try:
f = open(file, 'r')
except IOError:
return None
pat = re.escape(key) + ':'
prog = re.compile(pat, casefold and re.IGNORECASE)
while 1:
line = f.readline()
if not line: break
if prog.match(line):
text = line[len(key)+1:]
while 1:
line = f.readline()
if not line or not line[0].isspace():
break
text = text + line
return text.strip()
return None
def updateline(file, key, value, casefold = 1):
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except IOError:
lines = []
pat = re.escape(key) + ':(.*)\n'
prog = re.compile(pat, casefold and re.IGNORECASE)
if value is None:
newline = None
else:
newline = '%s: %s\n' % (key, value)
for i in range(len(lines)):
line = lines[i]
if prog.match(line):
if newline is None:
del lines[i]
else:
lines[i] = newline
break
else:
if newline is not None:
lines.append(newline)
tempfile = file + "~"
f = open(tempfile, 'w')
for line in lines:
f.write(line)
f.close()
os.rename(tempfile, file)
# Test program
def test():
global mh, f
os.system('rm -rf $HOME/Mail/@test')
mh = MH()
def do(s): print s; print eval(s)
do('mh.listfolders()')
do('mh.listallfolders()')
testfolders = ['@test', '@test/test1', '@test/test2',
'@test/test1/test11', '@test/test1/test12',
'@test/test1/test11/test111']
for t in testfolders: do('mh.makefolder(%r)' % (t,))
do('mh.listsubfolders(\'@test\')')
do('mh.listallsubfolders(\'@test\')')
f = mh.openfolder('@test')
do('f.listsubfolders()')
do('f.listallsubfolders()')
do('f.getsequences()')
seqs = f.getsequences()
seqs['foo'] = IntSet('1-10 12-20', ' ').tolist()
print seqs
f.putsequences(seqs)
do('f.getsequences()')
for t in reversed(testfolders): do('mh.deletefolder(%r)' % (t,))
do('mh.getcontext()')
context = mh.getcontext()
f = mh.openfolder(context)
do('f.getcurrent()')
for seq in ('first', 'last', 'cur', '.', 'prev', 'next',
'first:3', 'last:3', 'cur:3', 'cur:-3',
'prev:3', 'next:3',
'1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
'all'):
try:
do('f.parsesequence(%r)' % (seq,))
except Error, msg:
print "Error:", msg
stuff = os.popen("pick %r 2>/dev/null" % (seq,)).read()
list = map(int, stuff.split())
print list, "<-- pick"
do('f.listmessages()')
if __name__ == '__main__':
test()
| lgpl-2.1 |
choderalab/openpathsampling | openpathsampling/engines/features/shared.py | 1 | 10616 | import copy
import numpy as np
from openpathsampling.netcdfplus import StorableObject, ObjectStore, WeakLRUCache
from openpathsampling.integration_tools import error_if_no_simtk_unit, unit
def unmask_quantity(quantity):
"""Force a maskedarray quantity to be unmasked.
NetCDF keeps giving us masked arrays, even when we tell it not to.
Masked arrays cause all kinds of havoc with other parts of the code.
Parameters
----------
quantity : simtk.unit.Quantity wrapping a numpy (masked) array
quantity to unmask
Returns
-------
simtk.unit.Quantity
wraps a regular numpy array, not a masked array
"""
try:
q_unit = quantity.unit
except AttributeError:
# no units
return quantity
return np.array(quantity.value_in_unit(q_unit)) * q_unit
# =============================================================================
# SIMULATION CONFIGURATION
# =============================================================================
class StaticContainer(StorableObject):
"""
Simulation configuration. Only Coordinates, the associated boxvectors
and the potential_energy
Attributes
----------
coordinates : simtk.unit.Quantity wrapping Nx3 np array of dimension length
atomic coordinates
box_vectors : periodic box vectors
the periodic box vectors
"""
# Class variables to store the global storage and the system context
# describing the system to be saved as configuration_indices
def __init__(self, coordinates, box_vectors):
"""
Create a simulation configuration from either an OpenMM context or
individually-specified components.
Parameters
----------
coordinates
box_vectors
"""
super(StaticContainer, self).__init__()
self.coordinates = copy.deepcopy(coordinates)
self.box_vectors = copy.deepcopy(box_vectors)
# if self.coordinates is not None:
# # Check for nans in coordinates, and raise an exception if
# # something is wrong.
# if type(self.coordinates) is unit.Quantity:
# coords = self.coordinates._value
# else:
# coords = self.coordinates
#
# if np.any(np.isnan(coords)):
# bad_atoms = [i for i in range(len(coords))
# if np.any(np.isnan(coords[i]))]
# raise ValueError("Coordinates went 'nan' for atoms: " +
# str(bad_atoms))
return
# =========================================================================
# Comparison functions
# =========================================================================
@property
def n_atoms(self):
"""
Returns the number of atoms in the configuration
"""
return self.coordinates.shape[0]
# =========================================================================
# Utility functions
# =========================================================================
def copy(self):
"""
Returns a deep copy of the instance itself using a subset of coordinates.
If this object is saved it will be stored as a separate object and
consume additional memory.
Returns
-------
Configuration()
the reduced deep copy
"""
return StaticContainer(coordinates=self.coordinates,
box_vectors=self.box_vectors
)
def to_dict(self):
return {
'coordinates': self.coordinates,
'box_vectors': self.box_vectors
}
class StaticContainerStore(ObjectStore):
"""
An ObjectStore for Configuration. Allows to store Configuration() instances in a netcdf file.
"""
def __init__(self):
super(StaticContainerStore, self).__init__(StaticContainer, json=False)
def to_dict(self):
return {}
def _save(self, configuration, idx):
# Store configuration.
self.vars['coordinates'][idx] = configuration.coordinates
box_vectors = configuration.box_vectors
if box_vectors is None:
n_spatial = configuration.coordinates.shape[1]
box_vectors = np.zeros((n_spatial, n_spatial))
self.vars['box_vectors'][idx] = box_vectors
def get(self, indices):
return [self.load(idx) for idx in indices]
def _load(self, idx):
coordinates = self.vars["coordinates"][idx]
box_vectors = self.vars["box_vectors"][idx]
if not np.count_nonzero(box_vectors):
box_vectors = None
configuration = StaticContainer(coordinates=coordinates, box_vectors=box_vectors)
return configuration
def coordinates_as_numpy(self, frame_indices=None, atom_indices=None):
"""
Return the atom coordinates in the storage for given frame indices
and atoms
Parameters
----------
frame_indices : list of int or None
the frame indices to be included. If None all frames are returned
atom_indices : list of int or None
the atom indices to be included. If None all atoms are returned
Returns
-------
numpy.array, shape=(n_frames, n_atoms)
the array of atom coordinates in a float32 numpy array
"""
if frame_indices is None:
frame_indices = slice(None)
if atom_indices is None:
atom_indices = slice(None)
variable = self.storage.variables[self.prefix + '_coordinates']
return variable[frame_indices, atom_indices, :].astype(
np.float32).copy()
def initialize(self):
super(StaticContainerStore, self).initialize()
error_if_no_simtk_unit("StaticContainerStore")
self.create_variable(
'coordinates', 'numpy.float32',
dimensions=('n_atoms', 'n_spatial'),
description="coordinate of atom '{ix[1]}' in dimension " +
"'{ix[2]}' of configuration '{ix[0]}'.",
chunksizes=('n_atoms', 'n_spatial'),
simtk_unit=unit.nanometers)
self.create_variable(
'box_vectors', 'numpy.float32',
dimensions=('n_spatial', 'n_spatial'),
chunksizes=('n_spatial', 'n_spatial'),
simtk_unit=unit.nanometers)
# =============================================================================
# SIMULATION MOMENTUM / VELOCITY
# =============================================================================
class KineticContainer(StorableObject):
"""
Simulation momentum. Contains only velocities of all atoms and
associated kinetic energies
Attributes
----------
velocities : simtk.unit.Quantity wrapping Nx3 np array of dimension length
atomic velocities
"""
def __init__(self, velocities):
"""
Create a simulation momentum from either an OpenMM context or
individually-specified components.
Parameters
----------
velocities
"""
super(KineticContainer, self).__init__()
self.velocities = copy.deepcopy(velocities)
# =========================================================================
# Utility functions
# =========================================================================
def copy(self):
"""
Returns a deep copy of the instance itself. If saved this object will
be stored as a separate object and consume additional memory.
Returns
-------
Momentum()
the shallow copy
"""
this = KineticContainer(velocities=self.velocities)
return this
def to_dict(self):
return {
'velocities': self.velocities
}
class KineticContainerStore(ObjectStore):
"""
An ObjectStore for Momenta. Allows to store Momentum() instances in a netcdf file.
"""
def __init__(self):
super(KineticContainerStore, self).__init__(KineticContainer, json=False)
def to_dict(self):
return {}
def _save(self, momentum, idx):
self.vars['velocities'][idx, :, :] = momentum.velocities
def _load(self, idx):
velocities = self.vars['velocities'][idx]
momentum = KineticContainer(velocities=velocities)
return momentum
def velocities_as_numpy(self, frame_indices=None, atom_indices=None):
"""
Return a block of stored velocities in the database as a numpy array.
Parameters
----------
frame_indices : list of int or None
the indices of Momentum objects to be retrieved from the database.
If `None` is specified then all indices are returned!
atom_indices : list of int of None
if not None only the specified atom_indices are returned. Might
speed up reading a lot.
"""
if frame_indices is None:
frame_indices = slice(None)
if atom_indices is None:
atom_indices = slice(None)
v = self.variables['velocities']
return v[frame_indices, atom_indices, :].astype(np.float32).copy()
def velocities_as_array(self, frame_indices=None, atom_indices=None):
"""
Returns a numpy array consisting of all velocities at the given indices
Parameters
----------
frame_indices : list of int
momenta indices to be loaded
atom_indices : list of int
selects only the atoms to be returned. If None (Default) all atoms
will be selected
Returns
-------
numpy.ndarray, shape = (l,n)
returns an array with `l` the number of frames and `n` the number
of atoms
"""
return self.velocities_as_numpy(frame_indices, atom_indices)
def initialize(self):
"""
Initializes the associated storage to index momentums in it
"""
error_if_no_simtk_unit("KineticContainerStore")
super(KineticContainerStore, self).initialize()
self.create_variable(
'velocities', 'numpy.float32',
dimensions=('n_atoms', 'n_spatial'),
description="the velocity of atom 'atom' in dimension " +
"'coordinate' of momentum 'momentum'.",
chunksizes=('n_atoms', 'n_spatial'),
simtk_unit=unit.nanometers / unit.picoseconds)
| lgpl-2.1 |
pylada/pylada-light | tests/vasp/test_parse.py | 1 | 3004 | ###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
def test_parse_incar():
from pylada.vasp import parse_incar
from pylada.error import ValueError
string = """ALGO = Fast\n"""\
"""ENCUT = 294.414\n"""\
"""EDIFF = 2e-12\n"""\
"""MAGMOM = -0*8 1.5*2\n"""\
"""ISPIN = 1\n"""\
"""ISMEAR = -1\n"""\
"""SIGMA = 0.001\n"""\
"""ISIF = 2\n"""\
"""NSW = 50\n"""\
"""IBRION = 2\n"""\
"""LMAXMIX = 4\n"""\
"""LCHARG = .TRUE.\n"""\
"""LVTOT = .FALSE.\n"""\
"""SYSTEM = Zinc-Blende\n"""
def get_errors(found):
errors = {}
expected = {'ALGO': 'Fast', 'ENCUT': '294.414', 'EDIFF': '2e-12',
'ISPIN': '1', 'MAGMOM': '0*8 1.5*2', 'ISMEAR': '-1',
'SIGMA': '0.001', 'ISIF': '2', 'NSW': '50', 'IBRION': '2',
'LMAXMIX': '4', 'LCHARG': '.TRUE.', 'LVTOT': '.FALSE',
'SYSTEM': 'Zinc-Blende'}
for key in set(found.keys()) - set(expected.keys()):
errors[key] = found[key]
for key in set(expected.keys()) - set(found.keys()):
errors[key] = None
return errors
result = parse_incar(string)
assert len(get_errors(result)) == 0
assert parse_incar(string.replace('\n', '\n#')) == [('ALGO', 'Fast')]
assert len(get_errors(parse_incar(string.replace('\n', ';', 2)))) == 0
assert len(get_errors(parse_incar(string.replace('=', '\\\n =', 2)))) == 0
assert len(get_errors(parse_incar(string.replace('=', '= \\\n ', 2)))) == 0
try:
parse_incar(string + "LVTOT = .TRUE.")
except ValueError:
pass
else:
raise Exception()
try:
parse_incar(string + " = .TRUE.")
except ValueError:
pass
else:
raise Exception()
| gpl-3.0 |
jamming/FrameworkBenchmarks | toolset/databases/mongodb/mongodb.py | 14 | 2832 | import pymongo
import traceback
from colorama import Fore
from toolset.utils.output_helper import log
from toolset.databases.abstract_database import AbstractDatabase
class Database(AbstractDatabase):
@classmethod
def get_connection(cls, config):
return pymongo.MongoClient(host = config.database_host)
@classmethod
def get_current_world_table(cls, config):
results_json = []
try:
worlds_json = {}
print("DATABASE_HOST: %s" % config.database_host)
connection = cls.get_connection(config)
db = connection.hello_world
for world in db.world.find():
if "randomNumber" in world:
if "id" in world:
worlds_json[str(int(world["id"]))] = int(
world["randomNumber"])
elif "_id" in world:
worlds_json[str(int(world["_id"]))] = int(
world["randomNumber"])
results_json.append(worlds_json)
connection.close()
except Exception:
tb = traceback.format_exc()
log("ERROR: Unable to load current MongoDB World table.",
color=Fore.RED)
log(tb)
return results_json
@classmethod
def test_connection(cls, config):
try:
connection = cls.get_connection(config)
db = connection.hello_world
db.world.find()
db.close()
return True
except:
return False
@classmethod
def get_queries(cls, config):
co = cls.get_connection(config)
status = co.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)]))
return int(status["opcounters"]["query"]) + int(status["opcounters"]["update"]) #get_queries returns all the queries
@classmethod
def get_rows(cls, config):
co = cls.get_connection(config)
status = co.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)]))
return int(status["opcounters"]["query"]) * cls.get_rows_per_query(co)
@classmethod
def get_rows_updated(cls, config):
co = cls.get_connection(config)
status = co.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)]))
return int(status["opcounters"]["update"]) * cls.get_rows_per_query(co)
@classmethod
def reset_cache(cls, config):
co = cls.get_connection(config)
co.admin.command({"planCacheClear": "world"})
co.admin.command({"planCacheClear": "fortune"})
@classmethod
def get_rows_per_query(cls, co):
rows_per_query = 1
if cls.tbl_name == "fortune":
rows_per_query = co["hello_world"][cls.tbl_name].count_documents({})
return rows_per_query
| bsd-3-clause |
caogang/wgan-gp | tflib/ops/linear.py | 6 | 4548 | import tflib as lib
import numpy as np
import tensorflow as tf
_default_weightnorm = False
def enable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = True
def disable_default_weightnorm():
global _default_weightnorm
_default_weightnorm = False
_weights_stdev = None
def set_weights_stdev(weights_stdev):
global _weights_stdev
_weights_stdev = weights_stdev
def unset_weights_stdev():
global _weights_stdev
_weights_stdev = None
def Linear(
name,
input_dim,
output_dim,
inputs,
biases=True,
initialization=None,
weightnorm=None,
gain=1.
):
"""
initialization: None, `lecun`, 'glorot', `he`, 'glorot_he', `orthogonal`, `("uniform", range)`
"""
with tf.name_scope(name) as scope:
def uniform(stdev, size):
if _weights_stdev is not None:
stdev = _weights_stdev
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype('float32')
if initialization == 'lecun':# and input_dim != output_dim):
# disabling orth. init for now because it's too slow
weight_values = uniform(
np.sqrt(1./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot' or (initialization == None):
weight_values = uniform(
np.sqrt(2./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'he':
weight_values = uniform(
np.sqrt(2./input_dim),
(input_dim, output_dim)
)
elif initialization == 'glorot_he':
weight_values = uniform(
np.sqrt(4./(input_dim+output_dim)),
(input_dim, output_dim)
)
elif initialization == 'orthogonal' or \
(initialization == None and input_dim == output_dim):
# From lasagne
def sample(shape):
if len(shape) < 2:
raise RuntimeError("Only shapes of length 2 or more are "
"supported.")
flat_shape = (shape[0], np.prod(shape[1:]))
# TODO: why normal and not uniform?
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
q = q.reshape(shape)
return q.astype('float32')
weight_values = sample((input_dim, output_dim))
elif initialization[0] == 'uniform':
weight_values = np.random.uniform(
low=-initialization[1],
high=initialization[1],
size=(input_dim, output_dim)
).astype('float32')
else:
raise Exception('Invalid initialization!')
weight_values *= gain
weight = lib.param(
name + '.W',
weight_values
)
if weightnorm==None:
weightnorm = _default_weightnorm
if weightnorm:
norm_values = np.sqrt(np.sum(np.square(weight_values), axis=0))
# norm_values = np.linalg.norm(weight_values, axis=0)
target_norms = lib.param(
name + '.g',
norm_values
)
with tf.name_scope('weightnorm') as scope:
norms = tf.sqrt(tf.reduce_sum(tf.square(weight), reduction_indices=[0]))
weight = weight * (target_norms / norms)
# if 'Discriminator' in name:
# print "WARNING weight constraint on {}".format(name)
# weight = tf.nn.softsign(10.*weight)*.1
if inputs.get_shape().ndims == 2:
result = tf.matmul(inputs, weight)
else:
reshaped_inputs = tf.reshape(inputs, [-1, input_dim])
result = tf.matmul(reshaped_inputs, weight)
result = tf.reshape(result, tf.pack(tf.unpack(tf.shape(inputs))[:-1] + [output_dim]))
if biases:
result = tf.nn.bias_add(
result,
lib.param(
name + '.b',
np.zeros((output_dim,), dtype='float32')
)
)
return result | mit |
ar7z1/ansible | lib/ansible/plugins/action/net_vrf.py | 756 | 1058 | # (c) 2017, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.net_base import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
return result
| gpl-3.0 |
supercheetah/diceroller | pyinstaller/buildtests/basic/test_pkg_structures.py | 2 | 1265 | # Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Tests - hooks, strange pkg structures, version, icon.
e1 = 'a_func from pkg2.a'
e2 = 'b_func from pkg2.b (pkg2/extra/b.py)'
e3 = 'notamodule from pkg2.__init__'
from pkg1 import *
t1 = a.a_func()
if t1 != e1:
print "expected:", e1
print " got:", t1
t2 = b.b_func()
if t2 != e2:
print "expected:", e2
print " got:", t2
t3 = notamodule()
if t3 != e3:
print "expected:", e3
print " got:", t3
| artistic-2.0 |
maclandrol/ete | ete2/evol/parser/__init__.py | 3 | 1603 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
#!/usr/bin/python
# Author: Francois-Jose Serra
# Creation Date: 2010/11/03 21:20:05
from codemlparser import *
from slrparser import *
| gpl-3.0 |
chris-wood/SCoNet | ns-3-dev/src/visualizer/visualizer/plugins/ndnsim_pit.py | 8 | 2831 | import gtk
import ns.core
import ns.network
import ns.internet
import ns.ndnSIM
from visualizer.base import InformationWindow
class ShowNdnPit(InformationWindow):
(
COLUMN_PREFIX,
COLUMN_FACE
) = range(2)
def __init__(self, visualizer, node_index):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.node = ns.network.NodeList.GetNode (node_index)
node_name = ns.core.Names.FindName (self.node)
title = "Ndn PIT for node %i" % node_index
if len(node_name) != 0:
title += " (" + str(node_name) + ")"
self.win.set_title (title)
self.visualizer = visualizer
self.node_index = node_index
self.table_model = gtk.ListStore(str, str, int)
treeview = gtk.TreeView(self.table_model)
treeview.show()
sw = gtk.ScrolledWindow()
sw.set_properties(hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_AUTOMATIC)
sw.show()
sw.add(treeview)
self.win.vbox.add(sw)
self.win.set_default_size(600, 300)
# Dest.
column = gtk.TreeViewColumn('Prefix', gtk.CellRendererText(),
text=self.COLUMN_PREFIX)
treeview.append_column(column)
# Interface
column = gtk.TreeViewColumn('Info', gtk.CellRendererText(),
text=self.COLUMN_FACE)
treeview.append_column(column)
self.visualizer.add_information_window(self)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
ndnPit = ns.ndnSIM.ndn.Pit.GetPit (self.node)
if ndnPit is None:
return
self.table_model.clear()
item = ndnPit.Begin ()
while (item != ndnPit.End ()):
tree_iter = self.table_model.append()
self.table_model.set(tree_iter,
self.COLUMN_PREFIX, str(item.GetPrefix()),
self.COLUMN_FACE, str(item))
item = ndnPit.Next (item)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Show NDN PIT")
menu_item.show()
def _show_ndn_pit(dummy_menu_item):
ShowNdnPit(viz, node.node_index)
menu_item.connect("activate", _show_ndn_pit)
menu.add(menu_item)
def register(viz):
viz.connect("populate-node-menu", populate_node_menu)
| gpl-2.0 |
wkritzinger/asuswrt-merlin | release/src/router/nfs-utils/tools/nfs-iostat/nfs-iostat.py | 16 | 20312 | #!/usr/bin/env python
# -*- python-mode -*-
"""Emulate iostat for NFS mount points using /proc/self/mountstats
"""
__copyright__ = """
Copyright (C) 2005, Chuck Lever <cel@netapp.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, time
Iostats_version = '0.2'
def difference(x, y):
"""Used for a map() function
"""
return x - y
NfsEventCounters = [
'inoderevalidates',
'dentryrevalidates',
'datainvalidates',
'attrinvalidates',
'vfsopen',
'vfslookup',
'vfspermission',
'vfsupdatepage',
'vfsreadpage',
'vfsreadpages',
'vfswritepage',
'vfswritepages',
'vfsreaddir',
'vfssetattr',
'vfsflush',
'vfsfsync',
'vfslock',
'vfsrelease',
'congestionwait',
'setattrtrunc',
'extendwrite',
'sillyrenames',
'shortreads',
'shortwrites',
'delay'
]
NfsByteCounters = [
'normalreadbytes',
'normalwritebytes',
'directreadbytes',
'directwritebytes',
'serverreadbytes',
'serverwritebytes',
'readpages',
'writepages'
]
class DeviceData:
"""DeviceData objects provide methods for parsing and displaying
data for a single mount grabbed from /proc/self/mountstats
"""
def __init__(self):
self.__nfs_data = dict()
self.__rpc_data = dict()
self.__rpc_data['ops'] = []
def __parse_nfs_line(self, words):
if words[0] == 'device':
self.__nfs_data['export'] = words[1]
self.__nfs_data['mountpoint'] = words[4]
self.__nfs_data['fstype'] = words[7]
if words[7] == 'nfs':
self.__nfs_data['statvers'] = words[8]
elif words[0] == 'age:':
self.__nfs_data['age'] = long(words[1])
elif words[0] == 'opts:':
self.__nfs_data['mountoptions'] = ''.join(words[1:]).split(',')
elif words[0] == 'caps:':
self.__nfs_data['servercapabilities'] = ''.join(words[1:]).split(',')
elif words[0] == 'nfsv4:':
self.__nfs_data['nfsv4flags'] = ''.join(words[1:]).split(',')
elif words[0] == 'sec:':
keys = ''.join(words[1:]).split(',')
self.__nfs_data['flavor'] = int(keys[0].split('=')[1])
self.__nfs_data['pseudoflavor'] = 0
if self.__nfs_data['flavor'] == 6:
self.__nfs_data['pseudoflavor'] = int(keys[1].split('=')[1])
elif words[0] == 'events:':
i = 1
for key in NfsEventCounters:
self.__nfs_data[key] = int(words[i])
i += 1
elif words[0] == 'bytes:':
i = 1
for key in NfsByteCounters:
self.__nfs_data[key] = long(words[i])
i += 1
def __parse_rpc_line(self, words):
if words[0] == 'RPC':
self.__rpc_data['statsvers'] = float(words[3])
self.__rpc_data['programversion'] = words[5]
elif words[0] == 'xprt:':
self.__rpc_data['protocol'] = words[1]
if words[1] == 'udp':
self.__rpc_data['port'] = int(words[2])
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['rpcsends'] = int(words[4])
self.__rpc_data['rpcreceives'] = int(words[5])
self.__rpc_data['badxids'] = int(words[6])
self.__rpc_data['inflightsends'] = long(words[7])
self.__rpc_data['backlogutil'] = long(words[8])
elif words[1] == 'tcp':
self.__rpc_data['port'] = words[2]
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['connect_count'] = int(words[4])
self.__rpc_data['connect_time'] = int(words[5])
self.__rpc_data['idle_time'] = int(words[6])
self.__rpc_data['rpcsends'] = int(words[7])
self.__rpc_data['rpcreceives'] = int(words[8])
self.__rpc_data['badxids'] = int(words[9])
self.__rpc_data['inflightsends'] = long(words[10])
self.__rpc_data['backlogutil'] = long(words[11])
elif words[1] == 'rdma':
self.__rpc_data['port'] = words[2]
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['connect_count'] = int(words[4])
self.__rpc_data['connect_time'] = int(words[5])
self.__rpc_data['idle_time'] = int(words[6])
self.__rpc_data['rpcsends'] = int(words[7])
self.__rpc_data['rpcreceives'] = int(words[8])
self.__rpc_data['badxids'] = int(words[9])
self.__rpc_data['backlogutil'] = int(words[10])
self.__rpc_data['read_chunks'] = int(words[11])
self.__rpc_data['write_chunks'] = int(words[12])
self.__rpc_data['reply_chunks'] = int(words[13])
self.__rpc_data['total_rdma_req'] = int(words[14])
self.__rpc_data['total_rdma_rep'] = int(words[15])
self.__rpc_data['pullup'] = int(words[16])
self.__rpc_data['fixup'] = int(words[17])
self.__rpc_data['hardway'] = int(words[18])
self.__rpc_data['failed_marshal'] = int(words[19])
self.__rpc_data['bad_reply'] = int(words[20])
elif words[0] == 'per-op':
self.__rpc_data['per-op'] = words
else:
op = words[0][:-1]
self.__rpc_data['ops'] += [op]
self.__rpc_data[op] = [long(word) for word in words[1:]]
def parse_stats(self, lines):
"""Turn a list of lines from a mount stat file into a
dictionary full of stats, keyed by name
"""
found = False
for line in lines:
words = line.split()
if len(words) == 0:
continue
if (not found and words[0] != 'RPC'):
self.__parse_nfs_line(words)
continue
found = True
self.__parse_rpc_line(words)
def is_nfs_mountpoint(self):
"""Return True if this is an NFS or NFSv4 mountpoint,
otherwise return False
"""
if self.__nfs_data['fstype'] == 'nfs':
return True
elif self.__nfs_data['fstype'] == 'nfs4':
return True
return False
def compare_iostats(self, old_stats):
"""Return the difference between two sets of stats
"""
result = DeviceData()
# copy self into result
for key, value in self.__nfs_data.iteritems():
result.__nfs_data[key] = value
for key, value in self.__rpc_data.iteritems():
result.__rpc_data[key] = value
# compute the difference of each item in the list
# note the copy loop above does not copy the lists, just
# the reference to them. so we build new lists here
# for the result object.
for op in result.__rpc_data['ops']:
result.__rpc_data[op] = map(difference, self.__rpc_data[op], old_stats.__rpc_data[op])
# update the remaining keys we care about
result.__rpc_data['rpcsends'] -= old_stats.__rpc_data['rpcsends']
result.__rpc_data['backlogutil'] -= old_stats.__rpc_data['backlogutil']
for key in NfsEventCounters:
result.__nfs_data[key] -= old_stats.__nfs_data[key]
for key in NfsByteCounters:
result.__nfs_data[key] -= old_stats.__nfs_data[key]
return result
def __print_data_cache_stats(self):
"""Print the data cache hit rate
"""
nfs_stats = self.__nfs_data
app_bytes_read = float(nfs_stats['normalreadbytes'])
if app_bytes_read != 0:
client_bytes_read = float(nfs_stats['serverreadbytes'] - nfs_stats['directreadbytes'])
ratio = ((app_bytes_read - client_bytes_read) * 100) / app_bytes_read
print
print 'app bytes: %f client bytes %f' % (app_bytes_read, client_bytes_read)
print 'Data cache hit ratio: %4.2f%%' % ratio
def __print_attr_cache_stats(self, sample_time):
"""Print attribute cache efficiency stats
"""
nfs_stats = self.__nfs_data
getattr_stats = self.__rpc_data['GETATTR']
if nfs_stats['inoderevalidates'] != 0:
getattr_ops = float(getattr_stats[1])
opens = float(nfs_stats['vfsopen'])
revalidates = float(nfs_stats['inoderevalidates']) - opens
if revalidates != 0:
ratio = ((revalidates - getattr_ops) * 100) / revalidates
else:
ratio = 0.0
data_invalidates = float(nfs_stats['datainvalidates'])
attr_invalidates = float(nfs_stats['attrinvalidates'])
print
print '%d inode revalidations, hitting in cache %4.2f%% of the time' % \
(revalidates, ratio)
print '%d open operations (mandatory GETATTR requests)' % opens
if getattr_ops != 0:
print '%4.2f%% of GETATTRs resulted in data cache invalidations' % \
((data_invalidates * 100) / getattr_ops)
def __print_dir_cache_stats(self, sample_time):
"""Print directory stats
"""
nfs_stats = self.__nfs_data
lookup_ops = self.__rpc_data['LOOKUP'][0]
readdir_ops = self.__rpc_data['READDIR'][0]
if self.__rpc_data.has_key('READDIRPLUS'):
readdir_ops += self.__rpc_data['READDIRPLUS'][0]
dentry_revals = nfs_stats['dentryrevalidates']
opens = nfs_stats['vfsopen']
lookups = nfs_stats['vfslookup']
getdents = nfs_stats['vfsreaddir']
print
print '%d open operations (pathname lookups)' % opens
print '%d dentry revalidates and %d vfs lookup requests' % \
(dentry_revals, lookups),
print 'resulted in %d LOOKUPs on the wire' % lookup_ops
print '%d vfs getdents calls resulted in %d READDIRs on the wire' % \
(getdents, readdir_ops)
def __print_page_stats(self, sample_time):
"""Print page cache stats
"""
nfs_stats = self.__nfs_data
vfsreadpage = nfs_stats['vfsreadpage']
vfsreadpages = nfs_stats['vfsreadpages']
pages_read = nfs_stats['readpages']
vfswritepage = nfs_stats['vfswritepage']
vfswritepages = nfs_stats['vfswritepages']
pages_written = nfs_stats['writepages']
print
print '%d nfs_readpage() calls read %d pages' % \
(vfsreadpage, vfsreadpage)
print '%d nfs_readpages() calls read %d pages' % \
(vfsreadpages, pages_read - vfsreadpage),
if vfsreadpages != 0:
print '(%.1f pages per call)' % \
(float(pages_read - vfsreadpage) / vfsreadpages)
else:
print
print
print '%d nfs_updatepage() calls' % nfs_stats['vfsupdatepage']
print '%d nfs_writepage() calls wrote %d pages' % \
(vfswritepage, vfswritepage)
print '%d nfs_writepages() calls wrote %d pages' % \
(vfswritepages, pages_written - vfswritepage),
if (vfswritepages) != 0:
print '(%.1f pages per call)' % \
(float(pages_written - vfswritepage) / vfswritepages)
else:
print
congestionwaits = nfs_stats['congestionwait']
if congestionwaits != 0:
print
print '%d congestion waits' % congestionwaits
def __print_rpc_op_stats(self, op, sample_time):
"""Print generic stats for one RPC op
"""
if not self.__rpc_data.has_key(op):
return
rpc_stats = self.__rpc_data[op]
ops = float(rpc_stats[0])
retrans = float(rpc_stats[1] - rpc_stats[0])
kilobytes = float(rpc_stats[3] + rpc_stats[4]) / 1024
rtt = float(rpc_stats[6])
exe = float(rpc_stats[7])
# prevent floating point exceptions
if ops != 0:
kb_per_op = kilobytes / ops
retrans_percent = (retrans * 100) / ops
rtt_per_op = rtt / ops
exe_per_op = exe / ops
else:
kb_per_op = 0.0
retrans_percent = 0.0
rtt_per_op = 0.0
exe_per_op = 0.0
op += ':'
print '%s' % op.lower().ljust(15),
print ' ops/s\t\t kB/s\t\t kB/op\t\tretrans\t\tavg RTT (ms)\tavg exe (ms)'
print '\t\t%7.3f' % (ops / sample_time),
print '\t%7.3f' % (kilobytes / sample_time),
print '\t%7.3f' % kb_per_op,
print ' %7d (%3.1f%%)' % (retrans, retrans_percent),
print '\t%7.3f' % rtt_per_op,
print '\t%7.3f' % exe_per_op
def display_iostats(self, sample_time, which):
"""Display NFS and RPC stats in an iostat-like way
"""
sends = float(self.__rpc_data['rpcsends'])
if sample_time == 0:
sample_time = float(self.__nfs_data['age'])
if sends != 0:
backlog = (float(self.__rpc_data['backlogutil']) / sends) / sample_time
else:
backlog = 0.0
print
print '%s mounted on %s:' % \
(self.__nfs_data['export'], self.__nfs_data['mountpoint'])
print
print ' op/s\t\trpc bklog'
print '%7.2f' % (sends / sample_time),
print '\t%7.2f' % backlog
if which == 0:
self.__print_rpc_op_stats('READ', sample_time)
self.__print_rpc_op_stats('WRITE', sample_time)
elif which == 1:
self.__print_rpc_op_stats('GETATTR', sample_time)
self.__print_rpc_op_stats('ACCESS', sample_time)
self.__print_attr_cache_stats(sample_time)
elif which == 2:
self.__print_rpc_op_stats('LOOKUP', sample_time)
self.__print_rpc_op_stats('READDIR', sample_time)
if self.__rpc_data.has_key('READDIRPLUS'):
self.__print_rpc_op_stats('READDIRPLUS', sample_time)
self.__print_dir_cache_stats(sample_time)
elif which == 3:
self.__print_rpc_op_stats('READ', sample_time)
self.__print_rpc_op_stats('WRITE', sample_time)
self.__print_page_stats(sample_time)
#
# Functions
#
def print_iostat_help(name):
print 'usage: %s [ <interval> [ <count> ] ] [ <options> ] [ <mount point> ] ' % name
print
print ' Version %s' % Iostats_version
print
print ' Sample iostat-like program to display NFS client per-mount statistics.'
print
print ' The <interval> parameter specifies the amount of time in seconds between'
print ' each report. The first report contains statistics for the time since each'
print ' file system was mounted. Each subsequent report contains statistics'
print ' collected during the interval since the previous report.'
print
print ' If the <count> parameter is specified, the value of <count> determines the'
print ' number of reports generated at <interval> seconds apart. If the interval'
print ' parameter is specified without the <count> parameter, the command generates'
print ' reports continuously.'
print
print ' Options include "--attr", which displays statistics related to the attribute'
print ' cache, "--dir", which displays statistics related to directory operations,'
print ' and "--page", which displays statistics related to the page cache.'
print ' By default, if no option is specified, statistics related to file I/O are'
print ' displayed.'
print
print ' If one or more <mount point> names are specified, statistics for only these'
print ' mount points will be displayed. Otherwise, all NFS mount points on the'
print ' client are listed.'
def parse_stats_file(filename):
"""pop the contents of a mountstats file into a dictionary,
keyed by mount point. each value object is a list of the
lines in the mountstats file corresponding to the mount
point named in the key.
"""
ms_dict = dict()
key = ''
f = file(filename)
for line in f.readlines():
words = line.split()
if len(words) == 0:
continue
if words[0] == 'device':
key = words[4]
new = [ line.strip() ]
else:
new += [ line.strip() ]
ms_dict[key] = new
f.close
return ms_dict
def print_iostat_summary(old, new, devices, time, ac):
for device in devices:
stats = DeviceData()
stats.parse_stats(new[device])
if not old:
stats.display_iostats(time, ac)
else:
old_stats = DeviceData()
old_stats.parse_stats(old[device])
diff_stats = stats.compare_iostats(old_stats)
diff_stats.display_iostats(time, ac)
def iostat_command(name):
"""iostat-like command for NFS mount points
"""
mountstats = parse_stats_file('/proc/self/mountstats')
devices = []
which = 0
interval_seen = False
count_seen = False
for arg in sys.argv:
if arg in ['-h', '--help', 'help', 'usage']:
print_iostat_help(name)
return
if arg in ['-v', '--version', 'version']:
print '%s version %s' % (name, Iostats_version)
return
if arg in ['-a', '--attr']:
which = 1
continue
if arg in ['-d', '--dir']:
which = 2
continue
if arg in ['-p', '--page']:
which = 3
continue
if arg == sys.argv[0]:
continue
if arg in mountstats:
devices += [arg]
elif not interval_seen:
interval = int(arg)
if interval > 0:
interval_seen = True
else:
print 'Illegal <interval> value'
return
elif not count_seen:
count = int(arg)
if count > 0:
count_seen = True
else:
print 'Illegal <count> value'
return
# make certain devices contains only NFS mount points
if len(devices) > 0:
check = []
for device in devices:
stats = DeviceData()
stats.parse_stats(mountstats[device])
if stats.is_nfs_mountpoint():
check += [device]
devices = check
else:
for device, descr in mountstats.iteritems():
stats = DeviceData()
stats.parse_stats(descr)
if stats.is_nfs_mountpoint():
devices += [device]
if len(devices) == 0:
print 'No NFS mount points were found'
return
old_mountstats = None
sample_time = 0.0
if not interval_seen:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
return
if count_seen:
while count != 0:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
old_mountstats = mountstats
time.sleep(interval)
sample_time = interval
mountstats = parse_stats_file('/proc/self/mountstats')
count -= 1
else:
while True:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
old_mountstats = mountstats
time.sleep(interval)
sample_time = interval
mountstats = parse_stats_file('/proc/self/mountstats')
#
# Main
#
prog = os.path.basename(sys.argv[0])
try:
iostat_command(prog)
except KeyboardInterrupt:
print 'Caught ^C... exiting'
sys.exit(1)
sys.exit(0)
| gpl-2.0 |
Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/l10n_co_account_extra/account_voucher.py | 5 | 2058 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
class account_voucher(osv.osv):
_inherit = 'account.voucher'
def _get_partner_bank(self, cr, uid, context=None):
if self.partner_id:
p = self.pool.get('res.partner').browse(cr, uid, self.partner_id)
if p.bank_ids:
bank_id = p.bank_ids[0].id
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
_columns = {
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.', readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'partner_bank_id': _get_partner_bank,
}
| agpl-3.0 |
francisco-dlp/hyperspy | hyperspy/_components/eels_cl_edge.py | 2 | 14796 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import math
import logging
import numpy as np
from scipy.interpolate import splev
from hyperspy.component import Component
from hyperspy.misc.eels.hartree_slater_gos import HartreeSlaterGOS
from hyperspy.misc.eels.hydrogenic_gos import HydrogenicGOS
from hyperspy.misc.eels.effective_angle import effective_angle
from hyperspy.ui_registry import add_gui_method
_logger = logging.getLogger(__name__)
@add_gui_method(toolkey="EELSCLEdge_Component")
class EELSCLEdge(Component):
"""EELS core loss ionisation edge from hydrogenic or tabulated
Hartree-Slater GOS with splines for fine structure fitting.
Hydrogenic GOS are limited to K and L shells.
Currently it only supports Peter Rez's Hartree Slater cross sections
parametrised as distributed by Gatan in their Digital Micrograph (DM)
software. If Digital Micrograph is installed in the system HyperSpy in the
standard location HyperSpy should find the path to the HS GOS folder.
Otherwise, the location of the folder can be defined in HyperSpy
preferences, which can be done through hs.preferences.gui() or the
hs.preferences.EELS.eels_gos_files_path variable.
Parameters
----------
element_subshell : {str, dict}
Usually a string, for example, 'Ti_L3' for the GOS of the titanium L3
subshell. If a dictionary is passed, it is assumed that Hartree Slater
GOS was exported using `GOS.as_dictionary`, and will be reconstructed.
GOS : {'hydrogenic', 'Hartree-Slater', None}
The GOS to use. If None it will use the Hartree-Slater GOS if
they are available, otherwise it will use the hydrogenic GOS.
Attributes
----------
onset_energy : Parameter
The edge onset position
intensity : Parameter
The factor by which the cross section is multiplied, what in
favourable cases is proportional to the number of atoms of
the element. It is a component.Parameter instance.
It is fixed by default.
fine_structure_coeff : Parameter
The coefficients of the spline that fits the fine structure.
Fix this parameter to fix the fine structure. It is a
component.Parameter instance.
effective_angle : Parameter
The effective collection semi-angle. It is automatically
calculated by set_microscope_parameters. It is a
component.Parameter instance. It is fixed by default.
fine_structure_smoothing : float between 0 and 1
Controls the level of smoothing of the fine structure model.
Decreasing the value increases the level of smoothing.
fine_structure_active : bool
Activates/deactivates the fine structure feature.
"""
_fine_structure_smoothing = 0.3
def __init__(self, element_subshell, GOS=None):
# Declare the parameters
Component.__init__(self,
['intensity',
'fine_structure_coeff',
'effective_angle',
'onset_energy'])
if isinstance(element_subshell, dict):
self.element = element_subshell['element']
self.subshell = element_subshell['subshell']
else:
self.element, self.subshell = element_subshell.split('_')
self.name = "_".join([self.element, self.subshell])
self.energy_scale = None
self.effective_angle.free = False
self.fine_structure_active = False
self.fine_structure_width = 30.
self.fine_structure_coeff.ext_force_positive = False
self.GOS = None
# Set initial actions
if GOS is None:
try:
self.GOS = HartreeSlaterGOS(element_subshell)
GOS = 'Hartree-Slater'
except IOError:
GOS = 'hydrogenic'
_logger.info(
'Hartree-Slater GOS not available. '
'Using hydrogenic GOS')
if self.GOS is None:
if GOS == 'Hartree-Slater':
self.GOS = HartreeSlaterGOS(element_subshell)
elif GOS == 'hydrogenic':
self.GOS = HydrogenicGOS(element_subshell)
else:
raise ValueError(
'gos must be one of: None, \'hydrogenic\''
' or \'Hartree-Slater\'')
self.onset_energy.value = self.GOS.onset_energy
self.onset_energy.free = False
self._position = self.onset_energy
self.free_onset_energy = False
self.intensity.grad = self.grad_intensity
self.intensity.value = 1
self.intensity.bmin = 0.
self.intensity.bmax = None
self._whitelist['GOS'] = ('init', GOS)
if GOS == 'Hartree-Slater':
self._whitelist['element_subshell'] = (
'init',
self.GOS.as_dictionary(True))
elif GOS == 'hydrogenic':
self._whitelist['element_subshell'] = ('init', element_subshell)
self._whitelist['fine_structure_active'] = None
self._whitelist['fine_structure_width'] = None
self._whitelist['fine_structure_smoothing'] = None
self.effective_angle.events.value_changed.connect(
self._integrate_GOS, [])
self.onset_energy.events.value_changed.connect(self._integrate_GOS, [])
self.onset_energy.events.value_changed.connect(
self._calculate_knots, [])
# Automatically fix the fine structure when the fine structure is
# disable.
# In this way we avoid a common source of problems when fitting
# However the fine structure must be *manually* freed when we
# reactivate the fine structure.
def _get_fine_structure_active(self):
return self.__fine_structure_active
def _set_fine_structure_active(self, arg):
if arg is False:
self.fine_structure_coeff.free = False
self.__fine_structure_active = arg
# Force replot
self.intensity.value = self.intensity.value
fine_structure_active = property(_get_fine_structure_active,
_set_fine_structure_active)
def _get_fine_structure_width(self):
return self.__fine_structure_width
def _set_fine_structure_width(self, arg):
self.__fine_structure_width = arg
self._set_fine_structure_coeff()
fine_structure_width = property(_get_fine_structure_width,
_set_fine_structure_width)
# E0
def _get_E0(self):
return self.__E0
def _set_E0(self, arg):
self.__E0 = arg
self._calculate_effective_angle()
E0 = property(_get_E0, _set_E0)
# Collection semi-angle
def _get_collection_angle(self):
return self.__collection_angle
def _set_collection_angle(self, arg):
self.__collection_angle = arg
self._calculate_effective_angle()
collection_angle = property(_get_collection_angle,
_set_collection_angle)
# Convergence semi-angle
def _get_convergence_angle(self):
return self.__convergence_angle
def _set_convergence_angle(self, arg):
self.__convergence_angle = arg
self._calculate_effective_angle()
convergence_angle = property(_get_convergence_angle,
_set_convergence_angle)
def _calculate_effective_angle(self):
try:
self.effective_angle.value = effective_angle(
self.E0,
self.GOS.onset_energy,
self.convergence_angle,
self.collection_angle)
except BaseException:
# All the parameters may not be defined yet...
pass
@property
def fine_structure_smoothing(self):
"""Controls the level of the smoothing of the fine structure.
It must a real number between 0 and 1. The higher close to 0
the higher the smoothing.
"""
return self._fine_structure_smoothing
@fine_structure_smoothing.setter
def fine_structure_smoothing(self, value):
if 0 <= value <= 1:
self._fine_structure_smoothing = value
self._set_fine_structure_coeff()
else:
raise ValueError(
"The value must be a number between 0 and 1")
# It is needed because the property cannot be used to sort the edges
def _onset_energy(self):
return self.onset_energy.value
def _set_fine_structure_coeff(self):
if self.energy_scale is None:
return
self.fine_structure_coeff._number_of_elements = int(
round(self.fine_structure_smoothing *
self.fine_structure_width /
self.energy_scale)) + 4
self.fine_structure_coeff.bmin = None
self.fine_structure_coeff.bmax = None
self._calculate_knots()
if self.fine_structure_coeff.map is not None:
self.fine_structure_coeff._create_array()
def set_microscope_parameters(self, E0, alpha, beta, energy_scale):
"""
Parameters
----------
E0 : float
Electron beam energy in keV.
alpha: float
Convergence semi-angle in mrad.
beta: float
Collection semi-angle in mrad.
energy_scale : float
The energy step in eV.
"""
# Relativistic correction factors
old = self.effective_angle.value
with self.effective_angle.events.value_changed.suppress_callback(
self._integrate_GOS):
self.convergence_angle = alpha
self.collection_angle = beta
self.energy_scale = energy_scale
self.E0 = E0
if self.effective_angle.value != old:
self._integrate_GOS()
def _integrate_GOS(self):
# Integration over q using splines
angle = self.effective_angle.value * 1e-3 # in rad
self.tab_xsection = self.GOS.integrateq(
self.onset_energy.value, angle, self.E0)
# Calculate extrapolation powerlaw extrapolation parameters
E1 = self.GOS.energy_axis[-2] + self.GOS.energy_shift
E2 = self.GOS.energy_axis[-1] + self.GOS.energy_shift
y1 = self.GOS.qint[-2] # in m**2/bin */
y2 = self.GOS.qint[-1] # in m**2/bin */
self.r = math.log(y2 / y1) / math.log(E1 / E2)
self.A = y1 / E1 ** -self.r
def _calculate_knots(self):
start = self.onset_energy.value
stop = start + self.fine_structure_width
self.__knots = np.r_[
[start] * 4,
np.linspace(
start,
stop,
self.fine_structure_coeff._number_of_elements)[
2:-2],
[stop] * 4]
def function(self, E):
"""Returns the number of counts in barns
"""
shift = self.onset_energy.value - self.GOS.onset_energy
if shift != self.GOS.energy_shift:
# Because hspy Events are not executed in any given order,
# an external function could be in the same event execution list
# as _integrate_GOS and be executed first. That can potentially
# cause an error that enforcing _integrate_GOS here prevents. Note
# that this is suboptimal because _integrate_GOS is computed twice
# unnecessarily.
self._integrate_GOS()
Emax = self.GOS.energy_axis[-1] + self.GOS.energy_shift
cts = np.zeros((len(E)))
bsignal = (E >= self.onset_energy.value)
if self.fine_structure_active is True:
bfs = bsignal * (
E < (self.onset_energy.value + self.fine_structure_width))
cts[bfs] = splev(
E[bfs], (
self.__knots,
self.fine_structure_coeff.value + (0,) * 4,
3))
bsignal[bfs] = False
itab = bsignal * (E <= Emax)
cts[itab] = self.tab_xsection(E[itab])
bsignal[itab] = False
cts[bsignal] = self.A * E[bsignal] ** -self.r
return cts * self.intensity.value
def grad_intensity(self, E):
return self.function(E) / self.intensity.value
def fine_structure_coeff_to_txt(self, filename):
np.savetxt(filename + '.dat', self.fine_structure_coeff.value,
fmt="%12.6G")
def txt_to_fine_structure_coeff(self, filename):
fs = np.loadtxt(filename)
self._calculate_knots()
if len(fs) == len(self.__knots):
self.fine_structure_coeff.value = fs
else:
raise ValueError(
"The provided fine structure file "
"doesn't match the size of the current fine structure")
def get_fine_structure_as_signal1D(self):
"""Returns a spectrum containing the fine structure.
Notes
-----
The fine structure is corrected from multiple scattering if
the model was convolved with a low-loss spectrum
"""
from hyperspy._signals.eels import EELSSpectrum
channels = int(np.floor(
self.fine_structure_width / self.energy_scale))
data = np.zeros(self.fine_structure_coeff.map.shape +
(channels,))
s = EELSSpectrum(
data,
axes=self.intensity._axes_manager._get_axes_dicts())
s.get_dimensions_from_data()
s.axes_manager.signal_axes[0].offset = self.onset_energy.value
# Backup the axes_manager
original_axes_manager = self._axes_manager
self._axes_manager = s.axes_manager
for spectrum in s:
self.fetch_stored_values()
spectrum.data[:] = self.function(
s.axes_manager.signal_axes[0].axis)
# Restore the axes_manager and the values
self._axes_manager = original_axes_manager
self.fetch_stored_values()
s.metadata.General.title = self.name.replace(
'_', ' ') + ' fine structure'
return s
| gpl-3.0 |
sameer2800/sam-s-club-auctions | flask/lib/python3.6/site-packages/setuptools/site-patch.py | 356 | 2307 | def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys, 'path_importer_cache', {})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
for item in stdpath:
if item == mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site', [item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site', stream, path, descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys, '__egginsert', 0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np == nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__ == 'site':
__boot()
del __boot
| apache-2.0 |
ucsb-seclab/ictf-framework | database/ictf-db-export-s3.py | 1 | 4447 | #!/usr/bin/env python
import sys
from subprocess import check_output, CalledProcessError, Popen
import logging
import os
import signal
import re
import boto3
import yaml
import glob
from os.path import join, basename
import threading
from time import sleep
from filecmp import cmp as fcmp
sys.path.append(os.path.abspath(join(os.path.dirname(__file__), '..')))
from common.world_constants import CfgKey
SLEEP_TIME = 30
BACKUP_DIR = "/media/backups/ictf/"
BACKUPS_COPIED = join(BACKUP_DIR, "copies.dat")
DEBUG_LOG_FILENAME = '/var/log/ictf-db-export-s3.log'
# set up formatting
formatter = logging.Formatter('[%(asctime)s] %(levelno)s (%(process)d) %(module)s: %(message)s')
# set up logging to STDOUT for all levels DEBUG and higher
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
fh = logging.FileHandler(DEBUG_LOG_FILENAME)
# set up logging to a file for all levels DEBUG and higher
fh = logging.FileHandler(DEBUG_LOG_FILENAME)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
log = logging.getLogger('dbexportlogger')
log.setLevel(logging.INFO)
log.addHandler(sh)
log.addHandler(fh)
class InterruptException(Exception):
pass
def signal_handler(signal, frame):
raise InterruptException("SIG-INT received, sending exception")
def main():
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGABRT, signal_handler)
open(BACKUPS_COPIED, "a+").write("Starting up process\n")
while True:
try:
log.info("Starting DB Dump to s3 copier ")
with open("aws_config.yml") as f:
cfg = yaml.load(f)
region = cfg["region"]
access_key = cfg["access_key"]
secret_key = cfg["secret_key"]
bucket_name = cfg["bucket_name"]
dumps = sorted(glob.iglob(BACKUP_DIR + "*.gz"), key=os.path.getctime, reverse=True)
file_compare = False
if len(dumps) > 0:
if len(dumps) > 1:
file_compare = fcmp(dumps[0], dumps[1])
if file_compare:
log.info("the two most recent files are identical, skipping copy. ")
else:
newest_dump = dumps[0]
copied_files = open(BACKUPS_COPIED, "r").read().split("\n")
if newest_dump in copied_files:
log.info("skipping because already copied. ")
else:
s3 = boto3.resource('s3', region_name=region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
log.info("copying file " + str(newest_dump) + " to bucket " + str(bucket_name))
s3.meta.client.upload_file(newest_dump, bucket_name, basename(newest_dump),
Callback=ProgressPercentage(newest_dump))
except CalledProcessError as cpe:
log.exception(cpe)
except InterruptException:
log.info("Received Signal interrupt")
sys.exit(0)
except Exception as e:
log.exception("Caught exception".format(e))
log.info("Sleeping for " + str(SLEEP_TIME) + " seconds")
sleep(SLEEP_TIME)
# taken from http://takwatanabe.me/boto3/generated/boto3.s3.transfer.html
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
log.info(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
if self._size == self._seen_so_far:
log.info("Saving copied filename (" + self._filename + ")to copies.dat. " )
#os.remove(self._filename)
open(BACKUPS_COPIED,"a+").write(self._filename + "\n")
if __name__ == "__main__":
main()
| gpl-2.0 |
mikefullerton/Piecemeal-Scripts | Scripts/pieces/commands/install-xcode-configs.py | 1 | 1975 | #!/usr/bin/python
# pieces.py
# fishlamp-install
#
# Created by Mike Fullerton on 8/3/13.
#
import sys
import os
scriptName = os.path.basename(sys.argv[0])
scriptPath = os.path.dirname(sys.argv[0])
sharedPath = os.path.join(scriptPath, "../../shared/")
sys.path.append(os.path.abspath(sharedPath))
import Utils
import Pieces;
import Scripts
import datetime
import time
class Script(Scripts.Script):
def helpString(self):
return "installs or updates Xcode configs for Utils";
def destinationPath(self):
destinationDir = Utils.workingDirectory();
if self.hasParameterAtIndex(1) :
destinationDir = self.parameterAtIndex(1);
assertPathExists(destinationDir);
return os.path.join(destinationDir, "FishLampXcodeConfigs");
def arrayOfPaths(self) :
paths = []
for piece in Pieces.allPieces():
for aPath in piece.allPaths():
paths.append(aPath);
return paths;
def generateFileWithPath(self, destFolder, path) :
filePath = os.path.join(destFolder, "FISHLAMP_PIECES_PATHS.xcconfig");
f = open(filePath,'w');
f.write("# generated on " + datetime.datetime.now() + "\n\n");
f.write(path);
Utils.assertPathExists(filePath);
def run(self):
destDir = self.destinationPath();
Utils.deleteDirectory(destDir);
srcPath = self.templatePath("XcodeConfigs");
Utils.copyFileOrDirectory(srcPath, destDir)
fl = FishLampPieces.relativePathToPiecesFolder();
configPath = "FISHLAMP_PIECES_PATHS = "
for path in self.arrayOfPaths():
relativePathToPiecesFolder = os.path.join(fl, path)
print "# added path: \"" + relativePathToPiecesFolder + "\"";
configPath += relativePathToPiecesFolder + "/** "
self.generateFileWithPath(destDir, configPath);
print "# updated folder: " + destDir;
Script().run();
| mit |
stevehof/CouchPotatoServer | libs/suds/xsd/schema.py | 192 | 14328 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
import suds.metrics
from suds import *
from suds.xsd import *
from suds.xsd.sxbuiltin import *
from suds.xsd.sxbasic import Factory as BasicFactory
from suds.xsd.sxbuiltin import Factory as BuiltinFactory
from suds.xsd.sxbase import SchemaObject
from suds.xsd.deplist import DepList
from suds.sax.element import Element
from suds.sax import splitPrefix, Namespace
from logging import getLogger
log = getLogger(__name__)
class SchemaCollection:
"""
A collection of schema objects. This class is needed because WSDLs
may contain more then one <schema/> node.
@ivar wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
@ivar children: A list contained schemas.
@type children: [L{Schema},...]
@ivar namespaces: A dictionary of contained schemas by namespace.
@type namespaces: {str:L{Schema}}
"""
def __init__(self, wsdl):
"""
@param wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
"""
self.wsdl = wsdl
self.children = []
self.namespaces = {}
def add(self, schema):
"""
Add a schema node to the collection. Schema(s) within the same target
namespace are consolidated.
@param schema: A schema object.
@type schema: (L{Schema})
"""
key = schema.tns[1]
existing = self.namespaces.get(key)
if existing is None:
self.children.append(schema)
self.namespaces[key] = schema
else:
existing.root.children += schema.root.children
existing.root.nsprefixes.update(schema.root.nsprefixes)
def load(self, options):
"""
Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema}
"""
if options.autoblend:
self.autoblend()
for child in self.children:
child.build()
for child in self.children:
child.open_imports(options)
for child in self.children:
child.dereference()
log.debug('loaded:\n%s', self)
merged = self.merge()
log.debug('MERGED:\n%s', merged)
return merged
def autoblend(self):
"""
Ensure that all schemas within the collection
import each other which has a blending effect.
@return: self
@rtype: L{SchemaCollection}
"""
namespaces = self.namespaces.keys()
for s in self.children:
for ns in namespaces:
tns = s.root.get('targetNamespace')
if tns == ns:
continue
for imp in s.root.getChildren('import'):
if imp.get('namespace') == ns:
continue
imp = Element('import', ns=Namespace.xsdns)
imp.set('namespace', ns)
s.root.append(imp)
return self
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
return self.namespaces.get(ns[1])
def merge(self):
"""
Merge the contained schemas into one.
@return: The merged schema.
@rtype: L{Schema}
"""
if len(self):
schema = self.children[0]
for s in self.children[1:]:
schema.merge(s)
return schema
else:
return None
def __len__(self):
return len(self.children)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
result = ['\nschema collection']
for s in self.children:
result.append(s.str(1))
return '\n'.join(result)
class Schema:
"""
The schema is an objectification of a <schema/> (xsd) definition.
It provides inspection, lookup and type resolution.
@ivar root: The root node.
@type root: L{sax.element.Element}
@ivar baseurl: The I{base} URL for this schema.
@type baseurl: str
@ivar container: A schema collection containing this schema.
@type container: L{SchemaCollection}
@ivar children: A list of direct top level children.
@type children: [L{SchemaObject},...]
@ivar all: A list of all (includes imported) top level children.
@type all: [L{SchemaObject},...]
@ivar types: A schema types cache.
@type types: {name:L{SchemaObject}}
@ivar imports: A list of import objects.
@type imports: [L{SchemaObject},...]
@ivar elements: A list of <element/> objects.
@type elements: [L{SchemaObject},...]
@ivar attributes: A list of <attribute/> objects.
@type attributes: [L{SchemaObject},...]
@ivar groups: A list of group objects.
@type groups: [L{SchemaObject},...]
@ivar agrps: A list of attribute group objects.
@type agrps: [L{SchemaObject},...]
@ivar form_qualified: The flag indicating:
(@elementFormDefault).
@type form_qualified: bool
"""
Tag = 'schema'
def __init__(self, root, baseurl, options, container=None):
"""
@param root: The xml root.
@type root: L{sax.element.Element}
@param baseurl: The base url used for importing.
@type baseurl: basestring
@param options: An options dictionary.
@type options: L{options.Options}
@param container: An optional container.
@type container: L{SchemaCollection}
"""
self.root = root
self.id = objid(self)
self.tns = self.mktns()
self.baseurl = baseurl
self.container = container
self.children = []
self.all = []
self.types = {}
self.imports = []
self.elements = {}
self.attributes = {}
self.groups = {}
self.agrps = {}
if options.doctor is not None:
options.doctor.examine(root)
form = self.root.get('elementFormDefault')
if form is None:
self.form_qualified = False
else:
self.form_qualified = ( form == 'qualified' )
if container is None:
self.build()
self.open_imports(options)
log.debug('built:\n%s', self)
self.dereference()
log.debug('dereferenced:\n%s', self)
def mktns(self):
"""
Make the schema's target namespace.
@return: The namespace representation of the schema's
targetNamespace value.
@rtype: (prefix, uri)
"""
tns = [None, self.root.get('targetNamespace')]
if tns[1] is not None:
tns[0] = self.root.findPrefix(tns[1])
return tuple(tns)
def build(self):
"""
Build the schema (object graph) using the root node
using the factory.
- Build the graph.
- Collate the children.
"""
self.children = BasicFactory.build(self.root, self)
collated = BasicFactory.collate(self.children)
self.children = collated[0]
self.attributes = collated[2]
self.imports = collated[1]
self.elements = collated[3]
self.types = collated[4]
self.groups = collated[5]
self.agrps = collated[6]
def merge(self, schema):
"""
Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for bidirectional
import which produce cyclic includes.
@returns: self
@rtype: L{Schema}
"""
for item in schema.attributes.items():
if item[0] in self.attributes:
continue
self.all.append(item[1])
self.attributes[item[0]] = item[1]
for item in schema.elements.items():
if item[0] in self.elements:
continue
self.all.append(item[1])
self.elements[item[0]] = item[1]
for item in schema.types.items():
if item[0] in self.types:
continue
self.all.append(item[1])
self.types[item[0]] = item[1]
for item in schema.groups.items():
if item[0] in self.groups:
continue
self.all.append(item[1])
self.groups[item[0]] = item[1]
for item in schema.agrps.items():
if item[0] in self.agrps:
continue
self.all.append(item[1])
self.agrps[item[0]] = item[1]
schema.merged = True
return self
def open_imports(self, options):
"""
Instruct all contained L{sxbasic.Import} children to import
the schema's which they reference. The contents of the
imported schema are I{merged} in.
@param options: An options dictionary.
@type options: L{options.Options}
"""
for imp in self.imports:
imported = imp.open(options)
if imported is None:
continue
imported.open_imports(options)
log.debug('imported:\n%s', imported)
self.merge(imported)
def dereference(self):
"""
Instruct all children to perform dereferencing.
"""
all = []
indexes = {}
for child in self.children:
child.content(all)
deplist = DepList()
for x in all:
x.qualify()
midx, deps = x.dependencies()
item = (x, tuple(deps))
deplist.add(item)
indexes[x] = midx
for x, deps in deplist.sort():
midx = indexes.get(x)
if midx is None: continue
d = deps[midx]
log.debug('(%s) merging %s <== %s', self.tns[1], Repr(x), Repr(d))
x.merge(d)
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}.
The request is passed to the container.
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
if self.container is not None:
return self.container.locate(ns)
else:
return None
def custom(self, ref, context=None):
"""
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
"""
if ref is None:
return True
else:
return ( not self.builtin(ref, context) )
def builtin(self, ref, context=None):
"""
Get whether the specified reference is an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if builtin, else False.
@rtype: bool
"""
w3 = 'http://www.w3.org'
try:
if isqref(ref):
ns = ref[1]
return ( ref[0] in Factory.tags and ns.startswith(w3) )
if context is None:
context = self.root
prefix = splitPrefix(ref)[0]
prefixes = context.findPrefixes(w3, 'startswith')
return ( prefix in prefixes and ref[0] in Factory.tags )
except:
return False
def instance(self, root, baseurl, options):
"""
Create and return an new schema object using the
specified I{root} and I{url}.
@param root: A schema root node.
@type root: L{sax.element.Element}
@param baseurl: A base URL.
@type baseurl: str
@param options: An options dictionary.
@type options: L{options.Options}
@return: The newly created schema object.
@rtype: L{Schema}
@note: This is only used by Import children.
"""
return Schema(root, baseurl, options)
def str(self, indent=0):
tab = '%*s'%(indent*3, '')
result = []
result.append('%s%s' % (tab, self.id))
result.append('%s(raw)' % tab)
result.append(self.root.str(indent+1))
result.append('%s(model)' % tab)
for c in self.children:
result.append(c.str(indent+1))
result.append('')
return '\n'.join(result)
def __repr__(self):
myrep = '<%s tns="%s"/>' % (self.id, self.tns[1])
return myrep.encode('utf-8')
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
| gpl-3.0 |
RaumZeit/gdesklets-core | display/TargetDisplay.py | 2 | 1317 | from TargetGroup import TargetGroup
from utils.datatypes import *
from layout import Unit
class TargetDisplay(TargetGroup):
def __init__(self, name, parent):
TargetGroup.__init__(self, name, parent)
self._register_property("desktop-borders", TYPE_UNIT_LIST,
self._setp_container_stuff, self._getp)
self._register_property("window-flags", TYPE_LIST,
self._setp_container_stuff, self._getp)
self._register_property("shape", TYPE_STRING,
self._setp_container_stuff, self._getp)
self._register_property("icon", TYPE_STRING,
self._setp_container_stuff, self._getp)
self._register_property("title", TYPE_STRING,
self._setp_container_stuff, self._getp)
self._register_property("path", TYPE_STRING, None, self._getp)
self._setp("path", self._get_display().get_path())
self.set_prop("x", Unit.Unit())
self.set_prop("y", Unit.Unit())
self._setp_container_stuff("window-flags", ["decorated"])
#
# Container properties.
#
def _setp_container_stuff(self, key, value):
self._get_display().set_prop(key, value)
self._setp(key, value)
| gpl-2.0 |
Godiyos/python-for-android | python-modules/twisted/twisted/internet/error.py | 49 | 8649 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exceptions and errors for use in twisted.internet modules.
Maintainer: Itamar Shtull-Trauring
"""
import socket
from twisted.python import deprecate
from twisted.python.versions import Version
class BindError(Exception):
"""An error occurred binding to an interface"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class CannotListenError(BindError):
"""This gets raised by a call to startListening, when the object cannot start listening.
@ivar interface: the interface I tried to listen on
@ivar port: the port I tried to listen on
@ivar socketError: the exception I got when I tried to listen
@type socketError: L{socket.error}
"""
def __init__(self, interface, port, socketError):
BindError.__init__(self, interface, port, socketError)
self.interface = interface
self.port = port
self.socketError = socketError
def __str__(self):
iface = self.interface or 'any'
return "Couldn't listen on %s:%s: %s." % (iface, self.port,
self.socketError)
class MulticastJoinError(Exception):
"""
An attempt to join a multicast group failed.
"""
class MessageLengthError(Exception):
"""Message is too long to send"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class DNSLookupError(IOError):
"""DNS lookup failed"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectInProgressError(Exception):
"""A connect operation was started and isn't done yet."""
# connection errors
class ConnectError(Exception):
"""An error occurred while connecting"""
def __init__(self, osError=None, string=""):
self.osError = osError
Exception.__init__(self, string)
def __str__(self):
s = self.__doc__ or self.__class__.__name__
if self.osError:
s = '%s: %s' % (s, self.osError)
if self[0]:
s = '%s: %s' % (s, self[0])
s = '%s.' % s
return s
class ConnectBindError(ConnectError):
"""Couldn't bind"""
class UnknownHostError(ConnectError):
"""Hostname couldn't be looked up"""
class NoRouteError(ConnectError):
"""No route to host"""
class ConnectionRefusedError(ConnectError):
"""Connection was refused by other side"""
class TCPTimedOutError(ConnectError):
"""TCP connection timed out"""
class BadFileError(ConnectError):
"""File used for UNIX socket is no good"""
class ServiceNameUnknownError(ConnectError):
"""Service name given as port is unknown"""
class UserError(ConnectError):
"""User aborted connection"""
class TimeoutError(UserError):
"""User timeout caused connection failure"""
class SSLError(ConnectError):
"""An SSL error occurred"""
class VerifyError(Exception):
"""Could not verify something that was supposed to be signed.
"""
class PeerVerifyError(VerifyError):
"""The peer rejected our verify error.
"""
class CertificateError(Exception):
"""
We did not find a certificate where we expected to find one.
"""
try:
import errno
errnoMapping = {
errno.ENETUNREACH: NoRouteError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ETIMEDOUT: TCPTimedOutError,
}
if hasattr(errno, "WSAECONNREFUSED"):
errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
errnoMapping[errno.WSAENETUNREACH] = NoRouteError
except ImportError:
errnoMapping = {}
def getConnectError(e):
"""Given a socket exception, return connection error."""
try:
number, string = e
except ValueError:
return ConnectError(string=e)
if hasattr(socket, 'gaierror') and isinstance(e, socket.gaierror):
# only works in 2.2
klass = UnknownHostError
else:
klass = errnoMapping.get(number, ConnectError)
return klass(number, string)
class ConnectionClosed(Exception):
"""
Connection was closed, whether cleanly or non-cleanly.
"""
class ConnectionLost(ConnectionClosed):
"""Connection to the other side was lost in a non-clean fashion"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectionDone(ConnectionClosed):
"""Connection was closed cleanly"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectionFdescWentAway(ConnectionLost):
"""Uh""" #TODO
class AlreadyCalled(ValueError):
"""Tried to cancel an already-called event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class AlreadyCancelled(ValueError):
"""Tried to cancel an already-cancelled event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class PotentialZombieWarning(Warning):
"""
Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
result in termination of the created child process not being reported.
Deprecated in Twisted 10.0.
"""
MESSAGE = (
"spawnProcess called, but the SIGCHLD handler is not "
"installed. This probably means you have not yet "
"called reactor.run, or called "
"reactor.run(installSignalHandler=0). You will probably "
"never see this process finish, and it may become a "
"zombie process.")
deprecate.deprecatedModuleAttribute(
Version("Twisted", 10, 0, 0),
"There is no longer any potential for zombie process.",
__name__,
"PotentialZombieWarning")
class ProcessDone(ConnectionDone):
"""A process has ended without apparent errors"""
def __init__(self, status):
Exception.__init__(self, "process finished with exit code 0")
self.exitCode = 0
self.signal = None
self.status = status
class ProcessTerminated(ConnectionLost):
"""A process has ended with a probable error condition"""
def __init__(self, exitCode=None, signal=None, status=None):
self.exitCode = exitCode
self.signal = signal
self.status = status
s = "process ended"
if exitCode is not None: s = s + " with exit code %s" % exitCode
if signal is not None: s = s + " by signal %s" % signal
Exception.__init__(self, s)
class ProcessExitedAlready(Exception):
"""
The process has already exited and the operation requested can no longer
be performed.
"""
class NotConnectingError(RuntimeError):
"""The Connector was not connecting when it was asked to stop connecting"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class NotListeningError(RuntimeError):
"""The Port was not listening when it was asked to stop listening"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ReactorNotRunning(RuntimeError):
"""
Error raised when trying to stop a reactor which is not running.
"""
class ReactorAlreadyRunning(RuntimeError):
"""
Error raised when trying to start the reactor multiple times.
"""
class ReactorAlreadyInstalledError(AssertionError):
"""
Could not install reactor because one is already installed.
"""
class ConnectingCancelledError(Exception):
"""
An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
cancelled before it connects.
@ivar address: The L{IAddress} that is the destination of the
cancelled L{IStreamClientEndpoint}.
"""
def __init__(self, address):
"""
@param address: The L{IAddress} that is the destination of the
L{IStreamClientEndpoint} that was cancelled.
"""
Exception.__init__(self, address)
self.address = address
| apache-2.0 |
glove747/liberty-neutron | neutron/tests/unit/extensions/test_dns.py | 5 | 22551 | # Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import netaddr
from oslo_config import cfg
from neutron.common import constants
from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.extensions import dns
from neutron.tests.unit.db import test_db_base_plugin_v2
class DnsExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
return dns.get_extended_resources(version)
class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2):
"""Test plugin to mixin the DNS Integration extensions.
"""
supported_extension_aliases = ["dns-integration"]
class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
"""Test API extension dns attributes.
"""
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_dns.' +
'DnsExtensionTestPlugin')
ext_mgr = DnsExtensionManager()
super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_port(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'port': {'network_id': net_id,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'device_id',
'mac_address', 'name', 'fixed_ips',
'tenant_id', 'device_owner', 'security_groups',
'dns_name') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['port'][arg] = kwargs[arg]
# create a dhcp port device id if one hasn't been supplied
if ('device_owner' in kwargs and
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
'host' in kwargs and
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
port_req = self.new_create_request('ports', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
[i[resource]['id'] for i in items])
return res
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.port(name='myname') as port:
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual('myname', port['port']['name'])
self._verify_dns_assigment(port['port'],
ips_list=['10.0.0.2'])
def test_list_ports(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as v1, self.port() as v2, self.port() as v3:
ports = (v1, v2, v3)
res = self._test_list_resources('port', ports)
for port in res['ports']:
self._verify_dns_assigment(
port, ips_list=[port['fixed_ips'][0]['ip_address']])
def test_show_port(self):
with self.port() as port:
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port']['id'], sport['port']['id'])
self._verify_dns_assigment(
sport['port'],
ips_list=[sport['port']['fixed_ips'][0]['ip_address']])
def test_update_port_non_default_dns_domain_with_dns_name(self):
with self.port() as port:
cfg.CONF.set_override('dns_domain', 'example.com')
data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.2'],
dns_name='vm1')
def test_update_port_default_dns_domain_with_dns_name(self):
with self.port() as port:
data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.2'])
def _verify_dns_assigment(self, port, ips_list=None, exp_ips_ipv4=0,
exp_ips_ipv6=0, ipv4_cidrs=None, ipv6_cidrs=None,
dns_name=''):
ips_list = ips_list or []
ipv4_cidrs = ipv4_cidrs or []
ipv6_cidrs = ipv6_cidrs or []
self.assertEqual(port['dns_name'], dns_name)
dns_assignment = port['dns_assignment']
if ips_list:
self.assertEqual(len(dns_assignment), len(ips_list))
ips_set = set(ips_list)
else:
self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6)
ipv4_count = 0
ipv6_count = 0
subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs]
subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs]
request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn(
dns_name)
for assignment in dns_assignment:
if ips_list:
self.assertIn(assignment['ip_address'], ips_set)
ips_set.remove(assignment['ip_address'])
else:
ip = netaddr.IPAddress(assignment['ip_address'])
if ip.version == 4:
self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4))
ipv4_count += 1
else:
self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6))
ipv6_count += 1
hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name,
request_fqdn,
assignment)
self.assertEqual(assignment['hostname'], hostname)
self.assertEqual(assignment['fqdn'], fqdn)
if ips_list:
self.assertFalse(ips_set)
else:
self.assertEqual(ipv4_count, exp_ips_ipv4)
self.assertEqual(ipv6_count, exp_ips_ipv6)
def _get_dns_domain(self):
if not cfg.CONF.dns_domain:
return ''
if cfg.CONF.dns_domain.endswith('.'):
return cfg.CONF.dns_domain
return '%s.' % cfg.CONF.dns_domain
def _get_request_hostname_and_fqdn(self, dns_name):
request_dns_name = ''
request_fqdn = ''
dns_domain = self._get_dns_domain()
if dns_name and dns_domain and dns_domain != 'openstacklocal.':
request_dns_name = dns_name
request_fqdn = request_dns_name
if not request_dns_name.endswith('.'):
request_fqdn = '%s.%s' % (dns_name, dns_domain)
return request_dns_name, request_fqdn
def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn,
assignment):
dns_domain = self._get_dns_domain()
if request_dns_name:
hostname = request_dns_name
fqdn = request_fqdn
else:
hostname = 'host-%s' % assignment['ip_address'].replace(
'.', '-').replace(':', '-')
fqdn = hostname
if dns_domain:
fqdn = '%s.%s' % (hostname, dns_domain)
return hostname, fqdn
def _verify_ip_in_subnet(self, ip, subnets_list):
for subnet in subnets_list:
if ip in subnet:
return True
return False
def test_update_port_update_ip(self):
"""Test update of port IP.
Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10'])
def test_update_port_update_ip_address_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"},
{'ip_address': "10.0.0.2"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.10',
'subnet_id': subnet['subnet']['id']}, ips)
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.10',
'10.0.0.2'])
def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain(
self):
cfg.CONF.set_override('dns_domain', '')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.example.com.')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.example.com.')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'openstacklocal.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.bad-domain.com.')
self.assertEqual(res.status_code, 400)
expected_error = ('The dns_name passed is a FQDN. Its higher level '
'labels must be equal to the dns_domain option in '
'neutron.conf')
self.assertIn(expected_error, res.text)
def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
num_labels = int(
math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN))
dns_name = (('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * filler_len)
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name=dns_name)
self.assertEqual(res.status_code, 400)
expected_error = ("When the two are concatenated to form a FQDN "
"(with a '.' at the end), the resulting length "
"exceeds the maximum size")
self.assertIn(expected_error, res.text)
def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self,
dns_name=''):
"""Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dicts = [
{'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': 'fe80::1', 'cidr': 'fe80::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe81::1', 'cidr': 'fe81::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe82::1', 'cidr': 'fe82::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
{'gateway': 'fe83::1', 'cidr': 'fe83::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
subnets = {}
for sub_dict in sub_dicts:
subnet = self._make_subnet(
self.fmt, network,
gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
subnets[subnet['subnet']['id']] = sub_dict
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
if res.status_code != 201:
return res
port = self.deserialize(self.fmt, res)
# Since the create port request was made without a list of fixed IPs,
# the port should be associated with addresses for one of the
# IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
# SLAAC subnets.
self.assertEqual(4, len(port['port']['fixed_ips']))
addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
constants.IPV6_SLAAC: 0}
for fixed_ip in port['port']['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
if subnet_id in subnets:
addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
self.assertEqual(1, addr_mode_count[None])
self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
self._verify_dns_assigment(port['port'], exp_ips_ipv4=1,
exp_ips_ipv6=3,
ipv4_cidrs=[sub_dicts[0]['cidr'],
sub_dicts[1]['cidr']],
ipv6_cidrs=[sub_dicts[2]['cidr'],
sub_dicts[3]['cidr'],
sub_dicts[4]['cidr'],
sub_dicts[5]['cidr']],
dns_name=dns_name)
return res
def test_api_extension_validation_with_bad_dns_names(self):
num_labels = int(
math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN))
dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-',
'-vm01.test1', 'vm01.-test1', 'vm01._test1',
'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.',
'vm01.123.', '-' + 'a' * dns.DNS_LABEL_MAX_LEN,
'a' * (dns.DNS_LABEL_MAX_LEN + 1),
('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * (filler_len + 1)]
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None}
self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
for dns_name in dns_names:
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
self.assertEqual(res.status_code, 400)
is_expected_message = (
'cannot be converted to lowercase string' in res.text or
'not a valid PQDN or FQDN. Reason:' in res.text)
self.assertTrue(is_expected_message)
def test_api_extension_validation_with_good_dns_names(self):
cfg.CONF.set_override('dns_domain', 'example.com')
higher_labels_len = len('example.com.')
num_labels = int(
math.floor((dns.FQDN_MAX_LEN - higher_labels_len) /
dns.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor((dns.FQDN_MAX_LEN - higher_labels_len) %
dns.DNS_LABEL_MAX_LEN))
dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.',
'8vm01', 'vm-01.example.com.', 'vm01.test',
'vm01.test.example.com.', 'vm01.test-100',
'vm01.test-100.example.com.',
'a' * dns.DNS_LABEL_MAX_LEN,
('a' * dns.DNS_LABEL_MAX_LEN) + '.example.com.',
('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * (filler_len - 1)]
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None}
self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
for dns_name in dns_names:
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
self.assertEqual(res.status_code, 201)
| apache-2.0 |
marinho/geraldo | site/newsite/site-geraldo/django/contrib/webdesign/lorem_ipsum.py | 439 | 4872 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
| lgpl-3.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/pip/operations/check.py | 342 | 1590 |
def check_requirements(installed_dists):
missing_reqs_dict = {}
incompatible_reqs_dict = {}
for dist in installed_dists:
key = '%s==%s' % (dist.project_name, dist.version)
missing_reqs = list(get_missing_reqs(dist, installed_dists))
if missing_reqs:
missing_reqs_dict[key] = missing_reqs
incompatible_reqs = list(get_incompatible_reqs(
dist, installed_dists))
if incompatible_reqs:
incompatible_reqs_dict[key] = incompatible_reqs
return (missing_reqs_dict, incompatible_reqs_dict)
def get_missing_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that aren't present in
`installed_dists`.
"""
installed_names = set(d.project_name.lower() for d in installed_dists)
missing_requirements = set()
for requirement in dist.requires():
if requirement.project_name.lower() not in installed_names:
missing_requirements.add(requirement)
yield requirement
def get_incompatible_reqs(dist, installed_dists):
"""Return all of the requirements of `dist` that are present in
`installed_dists`, but have incompatible versions.
"""
installed_dists_by_name = {}
for installed_dist in installed_dists:
installed_dists_by_name[installed_dist.project_name] = installed_dist
for requirement in dist.requires():
present_dist = installed_dists_by_name.get(requirement.project_name)
if present_dist and present_dist not in requirement:
yield (requirement, present_dist)
| bsd-3-clause |
towerjoo/mindsbook | djangoappengine/storage.py | 110 | 5599 | import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.files.uploadedfile import UploadedFile
from django.core.files.uploadhandler import FileUploadHandler, \
StopFutureHandlers
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.utils.encoding import smart_str, force_unicode
from google.appengine.ext.blobstore import BlobInfo, BlobKey, delete, \
create_upload_url, BLOB_KEY_HEADER, BLOB_RANGE_HEADER, BlobReader
def prepare_upload(request, url, **kwargs):
return create_upload_url(url), {}
def serve_file(request, file, save_as, content_type, **kwargs):
if hasattr(file, 'file') and hasattr(file.file, 'blobstore_info'):
blobkey = file.file.blobstore_info.key()
elif hasattr(file, 'blobstore_info'):
blobkey = file.blobstore_info.key()
else:
raise ValueError("The provided file can't be served via the "
"Google App Engine Blobstore.")
response = HttpResponse(content_type=content_type)
response[BLOB_KEY_HEADER] = str(blobkey)
response['Accept-Ranges'] = 'bytes'
http_range = request.META.get('HTTP_RANGE')
if http_range is not None:
response[BLOB_RANGE_HEADER] = http_range
if save_as:
response['Content-Disposition'] = smart_str(u'attachment; filename=%s' % save_as)
if file.size is not None:
response['Content-Length'] = file.size
return response
class BlobstoreStorage(Storage):
"""Google App Engine Blobstore storage backend"""
def _open(self, name, mode='rb'):
return BlobstoreFile(name, mode, self)
def _save(self, name, content):
name = name.replace('\\', '/')
if hasattr(content, 'file') and hasattr(content.file, 'blobstore_info'):
data = content.file.blobstore_info
elif hasattr(content, 'blobstore_info'):
data = content.blobstore_info
else:
raise ValueError("The App Engine storage backend only supports "
"BlobstoreFile instances or File instances "
"whose file attribute is a BlobstoreFile.")
if isinstance(data, (BlobInfo, BlobKey)):
# We change the file name to the BlobKey's str() value
if isinstance(data, BlobInfo):
data = data.key()
return '%s/%s' % (data, name.lstrip('/'))
else:
raise ValueError("The App Engine Blobstore only supports "
"BlobInfo values. Data can't be uploaded "
"directly. You have to use the file upload "
"handler.")
def delete(self, name):
delete(self._get_key(name))
def exists(self, name):
return self._get_blobinfo(name) is not None
def size(self, name):
return self._get_blobinfo(name).size
def url(self, name):
raise NotImplementedError()
def get_valid_name(self, name):
return force_unicode(name).strip().replace('\\', '/')
def get_available_name(self, name):
return name.replace('\\', '/')
def _get_key(self, name):
return BlobKey(name.split('/', 1)[0])
def _get_blobinfo(self, name):
return BlobInfo.get(self._get_key(name))
class BlobstoreFile(File):
def __init__(self, name, mode, storage):
self.name = name
self._storage = storage
self._mode = mode
self.blobstore_info = storage._get_blobinfo(name)
@property
def size(self):
return self.blobstore_info.size
def write(self, content):
raise NotImplementedError()
@property
def file(self):
if not hasattr(self, '_file'):
self._file = BlobReader(self.blobstore_info.key())
return self._file
class BlobstoreFileUploadHandler(FileUploadHandler):
"""
File upload handler for the Google App Engine Blobstore
"""
def new_file(self, *args, **kwargs):
super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs)
blobkey = self.content_type_extra.get('blob-key')
self.active = blobkey is not None
if self.active:
self.blobkey = BlobKey(blobkey)
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if not self.active:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.active:
return
return BlobstoreUploadedFile(
blobinfo=BlobInfo(self.blobkey),
charset=self.charset)
class BlobstoreUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, blobinfo, charset):
super(BlobstoreUploadedFile, self).__init__(
BlobReader(blobinfo.key()), blobinfo.filename,
blobinfo.content_type, blobinfo.size, charset)
self.blobstore_info = blobinfo
def open(self, mode=None):
pass
def chunks(self, chunk_size=1024*128):
self.file.seek(0)
while True:
content = self.read(chunk_size)
if not content:
break
yield content
def multiple_chunks(self, chunk_size=1024*128):
return True
| bsd-3-clause |
sarakha63/persomov | libs/enzyme/core.py | 180 | 15208 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
import fourcc
import language
from strutils import str_to_unicode, unicode_to_str
UNPRINTABLE_KEYS = ['thumbnail', 'url', 'codec_private']
MEDIACORE = ['title', 'caption', 'comment', 'size', 'type', 'subtype', 'timestamp',
'keywords', 'country', 'language', 'langcode', 'url', 'artist',
'mime', 'datetime', 'tags', 'hash']
AUDIOCORE = ['channels', 'samplerate', 'length', 'encoder', 'codec', 'format',
'samplebits', 'bitrate', 'fourcc', 'trackno', 'id', 'userdate',
'enabled', 'default', 'codec_private']
MUSICCORE = ['trackof', 'album', 'genre', 'discs', 'thumbnail']
VIDEOCORE = ['length', 'encoder', 'bitrate', 'samplerate', 'codec', 'format',
'samplebits', 'width', 'height', 'fps', 'aspect', 'trackno',
'fourcc', 'id', 'enabled', 'default', 'codec_private']
AVCORE = ['length', 'encoder', 'trackno', 'trackof', 'copyright', 'product',
'genre', 'writer', 'producer', 'studio', 'rating', 'actors', 'thumbnail',
'delay', 'image', 'video', 'audio', 'subtitles', 'chapters', 'software',
'summary', 'synopsis', 'season', 'episode', 'series']
# get logging object
log = logging.getLogger(__name__)
class Media(object):
"""
Media is the base class to all Media Metadata Containers. It defines
the basic structures that handle metadata. Media and its derivates
contain a common set of metadata attributes that is listed in keys.
Specific derivates contain additional keys to the dublin core set that is
defined in Media.
"""
media = None
_keys = MEDIACORE
table_mapping = {}
def __init__(self, hash=None):
if hash is not None:
# create Media based on dict
for key, value in hash.items():
if isinstance(value, list) and value and isinstance(value[0], dict):
value = [Media(x) for x in value]
self._set(key, value)
return
self._keys = self._keys[:]
self.tables = {}
# Tags, unlike tables, are more well-defined dicts whose values are
# either Tag objects, other dicts (for nested tags), or lists of either
# (for multiple instances of the tag, e.g. actor). Where possible,
# parsers should transform tag names to conform to the Official
# Matroska tags defined at http://www.matroska.org/technical/specs/tagging/index.html
# All tag names will be lower-cased.
self.tags = Tags()
for key in set(self._keys) - set(['media', 'tags']):
setattr(self, key, None)
#
# unicode and string convertion for debugging
#
#TODO: Fix that mess
def __unicode__(self):
result = u''
# print normal attributes
lists = []
for key in self._keys:
value = getattr(self, key, None)
if value == None or key == 'url':
continue
if isinstance(value, list):
if not value:
continue
elif isinstance(value[0], basestring):
# Just a list of strings (keywords?), so don't treat it specially.
value = u', '.join(value)
else:
lists.append((key, value))
continue
elif isinstance(value, dict):
# Tables or tags treated separately.
continue
if key in UNPRINTABLE_KEYS:
value = '<unprintable data, size=%d>' % len(value)
result += u'| %10s: %s\n' % (unicode(key), unicode(value))
# print tags (recursively, to support nested tags).
def print_tags(tags, suffix, show_label):
result = ''
for n, (name, tag) in enumerate(tags.items()):
result += u'| %12s%s%s = ' % (u'tags: ' if n == 0 and show_label else '', suffix, name)
if isinstance(tag, list):
# TODO: doesn't support lists/dicts within lists.
result += u'%s\n' % ', '.join(subtag.value for subtag in tag)
else:
result += u'%s\n' % (tag.value or '')
if isinstance(tag, dict):
result += print_tags(tag, ' ', False)
return result
result += print_tags(self.tags, '', True)
# print lists
for key, l in lists:
for n, item in enumerate(l):
label = '+-- ' + key.rstrip('s').capitalize()
if key not in ['tracks', 'subtitles', 'chapters']:
label += ' Track'
result += u'%s #%d\n' % (label, n + 1)
result += '| ' + re.sub(r'\n(.)', r'\n| \1', unicode(item))
# print tables
#FIXME: WTH?
# if log.level >= 10:
# for name, table in self.tables.items():
# result += '+-- Table %s\n' % str(name)
# for key, value in table.items():
# try:
# value = unicode(value)
# if len(value) > 50:
# value = u'<unprintable data, size=%d>' % len(value)
# except (UnicodeDecodeError, TypeError):
# try:
# value = u'<unprintable data, size=%d>' % len(value)
# except AttributeError:
# value = u'<unprintable data>'
# result += u'| | %s: %s\n' % (unicode(key), value)
return result
def __str__(self):
return unicode(self).encode()
def __repr__(self):
if hasattr(self, 'url'):
return '<%s %s>' % (str(self.__class__)[8:-2], self.url)
else:
return '<%s>' % (str(self.__class__)[8:-2])
#
# internal functions
#
def _appendtable(self, name, hashmap):
"""
Appends a tables of additional metadata to the Object.
If such a table already exists, the given tables items are
added to the existing one.
"""
if name not in self.tables:
self.tables[name] = hashmap
else:
# Append to the already existing table
for k in hashmap.keys():
self.tables[name][k] = hashmap[k]
def _set(self, key, value):
"""
Set key to value and add the key to the internal keys list if
missing.
"""
if value is None and getattr(self, key, None) is None:
return
if isinstance(value, str):
value = str_to_unicode(value)
setattr(self, key, value)
if not key in self._keys:
self._keys.append(key)
def _set_url(self, url):
"""
Set the URL of the source
"""
self.url = url
def _finalize(self):
"""
Correct same data based on specific rules
"""
# make sure all strings are unicode
for key in self._keys:
if key in UNPRINTABLE_KEYS:
continue
value = getattr(self, key)
if value is None:
continue
if key == 'image':
if isinstance(value, unicode):
setattr(self, key, unicode_to_str(value))
continue
if isinstance(value, str):
setattr(self, key, str_to_unicode(value))
if isinstance(value, unicode):
setattr(self, key, value.strip().rstrip().replace(u'\0', u''))
if isinstance(value, list) and value and isinstance(value[0], Media):
for submenu in value:
submenu._finalize()
# copy needed tags from tables
for name, table in self.tables.items():
mapping = self.table_mapping.get(name, {})
for tag, attr in mapping.items():
if self.get(attr):
continue
value = table.get(tag, None)
if value is not None:
if not isinstance(value, (str, unicode)):
value = str_to_unicode(str(value))
elif isinstance(value, str):
value = str_to_unicode(value)
value = value.strip().rstrip().replace(u'\0', u'')
setattr(self, attr, value)
if 'fourcc' in self._keys and 'codec' in self._keys and self.codec is not None:
# Codec may be a fourcc, in which case we resolve it to its actual
# name and set the fourcc attribute.
self.fourcc, self.codec = fourcc.resolve(self.codec)
if 'language' in self._keys:
self.langcode, self.language = language.resolve(self.language)
#
# data access
#
def __contains__(self, key):
"""
Test if key exists in the dict
"""
return hasattr(self, key)
def get(self, attr, default=None):
"""
Returns the given attribute. If the attribute is not set by
the parser return 'default'.
"""
return getattr(self, attr, default)
def __getitem__(self, attr):
"""
Get the value of the given attribute
"""
return getattr(self, attr, None)
def __setitem__(self, key, value):
"""
Set the value of 'key' to 'value'
"""
setattr(self, key, value)
def has_key(self, key):
"""
Check if the object has an attribute 'key'
"""
return hasattr(self, key)
def convert(self):
"""
Convert Media to dict.
"""
result = {}
for k in self._keys:
value = getattr(self, k, None)
if isinstance(value, list) and value and isinstance(value[0], Media):
value = [x.convert() for x in value]
result[k] = value
return result
def keys(self):
"""
Return all keys for the attributes set by the parser.
"""
return self._keys
class Collection(Media):
"""
Collection of Digial Media like CD, DVD, Directory, Playlist
"""
_keys = Media._keys + ['id', 'tracks']
def __init__(self):
Media.__init__(self)
self.tracks = []
class Tag(object):
"""
An individual tag, which will be a value stored in a Tags object.
Tag values are strings (for binary data), unicode objects, or datetime
objects for tags that represent dates or times.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tag, self).__init__()
self.value = value
self.langcode = langcode
self.binary = binary
def __unicode__(self):
return unicode(self.value)
def __str__(self):
return str(self.value)
def __repr__(self):
if not self.binary:
return '<Tag object: %s>' % repr(self.value)
else:
return '<Binary Tag object: size=%d>' % len(self.value)
@property
def langcode(self):
return self._langcode
@langcode.setter
def langcode(self, code):
self._langcode, self.language = language.resolve(code)
class Tags(dict, Tag):
"""
A dictionary containing Tag objects. Values can be other Tags objects
(for nested tags), lists, or Tag objects.
A Tags object is more or less a dictionary but it also contains a value.
This is necessary in order to represent this kind of tag specification
(e.g. for Matroska)::
<Simple>
<Name>LAW_RATING</Name>
<String>PG</String>
<Simple>
<Name>COUNTRY</Name>
<String>US</String>
</Simple>
</Simple>
The attribute RATING has a value (PG), but it also has a child tag
COUNTRY that specifies the country code the rating belongs to.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tags, self).__init__()
self.value = value
self.langcode = langcode
self.binary = False
class AudioStream(Media):
"""
Audio Tracks in a Multiplexed Container.
"""
_keys = Media._keys + AUDIOCORE
class Music(AudioStream):
"""
Digital Music.
"""
_keys = AudioStream._keys + MUSICCORE
def _finalize(self):
"""
Correct same data based on specific rules
"""
AudioStream._finalize(self)
if self.trackof:
try:
# XXX Why is this needed anyway?
if int(self.trackno) < 10:
self.trackno = u'0%s' % int(self.trackno)
except (AttributeError, ValueError):
pass
class VideoStream(Media):
"""
Video Tracks in a Multiplexed Container.
"""
_keys = Media._keys + VIDEOCORE
class Chapter(Media):
"""
Chapter in a Multiplexed Container.
"""
_keys = ['enabled', 'name', 'pos', 'id']
def __init__(self, name=None, pos=0):
Media.__init__(self)
self.name = name
self.pos = pos
self.enabled = True
class Subtitle(Media):
"""
Subtitle Tracks in a Multiplexed Container.
"""
_keys = ['enabled', 'default', 'langcode', 'language', 'trackno', 'title',
'id', 'codec']
def __init__(self, language=None):
Media.__init__(self)
self.language = language
class AVContainer(Media):
"""
Container for Audio and Video streams. This is the Container Type for
all media, that contain more than one stream.
"""
_keys = Media._keys + AVCORE
def __init__(self):
Media.__init__(self)
self.audio = []
self.video = []
self.subtitles = []
self.chapters = []
def _finalize(self):
"""
Correct same data based on specific rules
"""
Media._finalize(self)
if not self.length and len(self.video) and self.video[0].length:
self.length = 0
# Length not specified for container, so use the largest length
# of its tracks as container length.
for track in self.video + self.audio:
if track.length:
self.length = max(self.length, track.length)
| gpl-3.0 |
kageiit/buck | python-dsl/buck_parser/buck_test.py | 5 | 25737 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import shutil
import tempfile
import unittest
from pathlib import Path, PurePosixPath, PureWindowsPath
from .buck import (
BuildFileContext,
LazyBuildEnvPartial,
flatten_dicts,
get_mismatched_args,
host_info,
subdir_glob,
)
from .glob_internal import glob_internal, path_component_starts_with_dot
from .glob_watchman import format_watchman_query_params
class FakePathMixin(object):
def glob(self, pattern):
# Python glob supports unix paths on windows out of the box
norm_pattern = pattern.replace("\\", "/")
return self.glob_results.get(norm_pattern)
def is_file(self):
return True
class FakePosixPath(FakePathMixin, PurePosixPath):
pass
class FakeWindowsPath(FakePathMixin, PureWindowsPath):
pass
def fake_path(fake_path_class, path, glob_results={}):
# Path does magic in __new__ with its args; it's hard to add more without
# changing that class. So we use a wrapper function to diddle with
# FakePath's members.
result = fake_path_class(path)
result.glob_results = {}
for pattern, paths in glob_results.items():
result.glob_results[pattern] = [result / fake_path_class(p) for p in paths]
return result
class TestBuckPlatform(unittest.TestCase):
def test_lazy_build_env_partial(self):
def cobol_binary(name, deps=[], build_env=None):
return (name, deps, build_env)
testLazy = LazyBuildEnvPartial(cobol_binary)
testLazy.build_env = {}
self.assertEqual(
("HAL", [1, 2, 3], {}), testLazy.invoke(name="HAL", deps=[1, 2, 3])
)
testLazy.build_env = {"abc": 789}
self.assertEqual(
("HAL", [1, 2, 3], {"abc": 789}),
testLazy.invoke(name="HAL", deps=[1, 2, 3]),
)
class TestBuckGlobMixin(object):
def do_glob(self, *args, **kwargs):
# subclasses can override this to test a different glob implementation
return glob_internal(*args, **kwargs)
def test_glob_includes_simple(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", "B.java"]}
)
self.assertGlobMatches(
["A.java", "B.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_sort(self):
search_base = self.fake_path(
"foo",
glob_results={"*.java": ["A.java", "E.java", "D.java", "C.java", "B.java"]},
)
self.assertGlobMatches(
["A.java", "B.java", "C.java", "D.java", "E.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_multi(self):
search_base = self.fake_path(
"foo",
glob_results={
"bar/*.java": ["bar/A.java", "bar/B.java"],
"baz/*.java": ["baz/C.java", "baz/D.java"],
},
)
self.assertGlobMatches(
["bar/A.java", "bar/B.java", "baz/C.java", "baz/D.java"],
self.do_glob(
includes=["bar/*.java", "baz/*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_excludes_double_star(self):
search_base = self.fake_path(
"foo", glob_results={"**/*.java": ["A.java", "B.java", "Test.java"]}
)
self.assertGlobMatches(
["A.java", "B.java"],
self.do_glob(
includes=["**/*.java"],
excludes=["**/*Test.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_excludes_multi(self):
search_base = self.fake_path(
"foo",
glob_results={
"bar/*.java": ["bar/A.java", "bar/B.java"],
"baz/*.java": ["baz/C.java", "baz/D.java"],
},
)
self.assertGlobMatches(
["bar/B.java", "baz/D.java"],
self.do_glob(
includes=["bar/*.java", "baz/*.java"],
excludes=["*/[AC].java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_excludes_relative(self):
search_base = self.fake_path(
"foo",
glob_results={"**/*.java": ["foo/A.java", "foo/bar/B.java", "bar/C.java"]},
)
self.assertGlobMatches(
["foo/A.java", "foo/bar/B.java"],
self.do_glob(
includes=["**/*.java"],
excludes=["bar/*.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_project_root_relative_excludes_relative(self):
search_base = self.fake_path(
"foo",
glob_results={"**/*.java": ["foo/A.java", "foo/bar/B.java", "bar/C.java"]},
)
self.assertGlobMatches(
["bar/C.java"],
self.do_glob(
includes=["**/*.java"],
excludes=[],
project_root_relative_excludes=["foo/foo/**"],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_skips_dotfiles(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", ".B.java"]}
)
self.assertGlobMatches(
["A.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_skips_dot_directories(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", ".test/B.java"]}
)
self.assertGlobMatches(
["A.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_does_not_skip_dotfiles_if_include_dotfiles(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", ".B.java"]}
)
self.assertGlobMatches(
[".B.java", "A.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=True,
search_base=search_base,
project_root=".",
),
)
def test_explicit_exclude_with_file_separator_excludes(self):
search_base = self.fake_path(
"foo",
glob_results={"java/**/*.java": ["java/Include.java", "java/Exclude.java"]},
)
self.assertGlobMatches(
["java/Include.java"],
self.do_glob(
includes=["java/**/*.java"],
excludes=["java/Exclude.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
class TestBuckSubdirGlobMixin(object):
def do_subdir_glob(self, *args, **kwargs):
# subclasses can override this to test a different glob implementation
return subdir_glob(*args, **kwargs)
def test_subdir_glob(self):
build_env = BuildFileContext(
self.fake_path(""),
None,
"BUCK",
None,
None,
None,
[],
None,
None,
None,
None,
False,
False,
{},
)
search_base = self.fake_path(
"foo",
glob_results={
"lib/bar/*.h": ["lib/bar/A.h", "lib/bar/B.h"],
"lib/baz/*.h": ["lib/baz/C.h", "lib/baz/D.h"],
},
)
self.assertGlobMatches(
{
"bar/B.h": "lib/bar/B.h",
"bar/A.h": "lib/bar/A.h",
"baz/D.h": "lib/baz/D.h",
"baz/C.h": "lib/baz/C.h",
},
self.do_subdir_glob(
[("lib", "bar/*.h"), ("lib", "baz/*.h")],
build_env=build_env,
search_base=search_base,
),
)
def test_subdir_glob_with_prefix(self):
build_env = BuildFileContext(
self.fake_path(""),
None,
"BUCK",
None,
None,
None,
[],
None,
None,
None,
None,
False,
False,
{},
)
search_base = self.fake_path(
"foo", glob_results={"lib/bar/*.h": ["lib/bar/A.h", "lib/bar/B.h"]}
)
self.assertGlobMatches(
{"Prefix/bar/B.h": "lib/bar/B.h", "Prefix/bar/A.h": "lib/bar/A.h"},
self.do_subdir_glob(
[("lib", "bar/*.h")],
prefix="Prefix",
build_env=build_env,
search_base=search_base,
),
)
class TestBuckPosix(TestBuckGlobMixin, TestBuckSubdirGlobMixin, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakePosixPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
self.assertEqual(expected, actual)
class TestBuckWindows(TestBuckGlobMixin, TestBuckSubdirGlobMixin, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakeWindowsPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
# Fix the path separator to make test writing easier
fixed_expected = None
if isinstance(expected, list):
fixed_expected = []
for path in expected:
fixed_expected.append(path.replace("/", "\\"))
else:
fixed_expected = {}
for key, value in expected.items():
fixed_expected.update(
{key.replace("/", "\\"): value.replace("/", "\\")}
)
self.assertEqual(fixed_expected, actual)
class TestBuck(unittest.TestCase):
def test_glob_double_star_integration(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, "b", "a", "c", "a")
os.makedirs(subdir)
f = open(os.path.join(subdir, "A.java"), "w")
f.close()
f = open(os.path.join(subdir, "B.java"), "w")
f.close()
f = open(os.path.join(subdir, "Test.java"), "w")
f.close()
f = open(os.path.join(subdir, ".tmp.java"), "w")
f.close()
os.makedirs(os.path.join(subdir, "NotAFile.java"))
self.assertEquals(
[
os.path.join("b", "a", "c", "a", "A.java"),
os.path.join("b", "a", "c", "a", "B.java"),
],
glob_internal(
includes=["b/a/**/*.java"],
excludes=["**/*Test.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=Path(d),
project_root=Path(d),
),
)
finally:
shutil.rmtree(d)
def test_glob_star_does_not_zero_match_dotfile(self):
# Verify the behavior of the "*." pattern. Note that in the shell,
# "*." does not match dotfiles by default:
#
# $ ls ~/*.gitconfig
# ls: /Users/mbolin/*.gitconfig: No such file or directory
#
# By comparison, glob() from the pathlib code in Python 3 will:
#
# >>> list(pathlib.Path(os.getenv('HOME')).glob('*.gitconfig'))
# [PosixPath('/Users/mbolin/.gitconfig')]
#
# Buck should follow what the shell does here. Note this must also
# hold true when Watchman is used to implement glob().
d = tempfile.mkdtemp()
try:
a_subdir = os.path.join(d, "a")
os.makedirs(a_subdir)
f = open(os.path.join(a_subdir, ".project.toml"), "w")
f.close()
f = open(os.path.join(a_subdir, "..project.toml"), "w")
f.close()
f = open(os.path.join(a_subdir, ".foo.project.toml"), "w")
f.close()
f = open(os.path.join(a_subdir, "Buck.project.toml"), "w")
f.close()
b_subdir = os.path.join(d, "b")
os.makedirs(b_subdir)
f = open(os.path.join(b_subdir, "B.project.toml"), "w")
f.close()
f = open(os.path.join(b_subdir, "B..project.toml"), "w")
f.close()
f = open(os.path.join(b_subdir, "Buck.project.toml"), "w")
f.close()
def do_glob(pattern, include_dotfiles):
return glob_internal(
includes=[pattern],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=include_dotfiles,
search_base=Path(d),
project_root=Path(d),
)
# Note that if include_dotfiles=False, the "*" in "*." will never
# do a zero-length match or match a sequence that starts with "."
# because "*." appears at the start of a path component boundary.
self.assertEquals(
[os.path.join("a", "Buck.project.toml")],
do_glob("a/*.project.toml", include_dotfiles=False),
)
self.assertEquals(
[
os.path.join("a", "..project.toml"),
os.path.join("a", ".foo.project.toml"),
os.path.join("a", ".project.toml"),
os.path.join("a", "Buck.project.toml"),
],
do_glob("a/*.project.toml", include_dotfiles=True),
)
# Note that "*." behaves differently if it is not at the start of a
# path component boundary.
self.assertEquals(
[
os.path.join("b", "B..project.toml"),
os.path.join("b", "B.project.toml"),
os.path.join("b", "Buck.project.toml"),
],
do_glob("b/B*.project.toml", include_dotfiles=False),
)
self.assertEquals(
[
os.path.join("b", "B..project.toml"),
os.path.join("b", "B.project.toml"),
os.path.join("b", "Buck.project.toml"),
],
do_glob("b/B*.project.toml", include_dotfiles=True),
)
finally:
shutil.rmtree(d)
def test_case_preserved(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, "java")
os.makedirs(subdir)
open(os.path.join(subdir, "Main.java"), "w").close()
self.assertEquals(
[os.path.join("java", "Main.java")],
glob_internal(
includes=["java/Main.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=Path(d),
project_root=Path(d),
),
)
finally:
shutil.rmtree(d)
def test_watchman_query_params_includes(self):
query_params = format_watchman_query_params(
["**/*.java"], [], False, "/path/to/glob", False
)
self.assertEquals(
{
"relative_root": "/path/to/glob",
"path": [""],
"fields": ["name"],
"expression": [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
"exists",
["anyof", ["match", "**/*.java", "wholename", {}]],
],
},
query_params,
)
def test_watchman_query_params_includes_and_excludes(self):
query_params = format_watchman_query_params(
["**/*.java"], ["**/*Test.java"], False, "/path/to/glob", False
)
self.assertEquals(
{
"relative_root": "/path/to/glob",
"path": [""],
"fields": ["name"],
"expression": [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
["not", ["anyof", ["match", "**/*Test.java", "wholename", {}]]],
"exists",
["anyof", ["match", "**/*.java", "wholename", {}]],
],
},
query_params,
)
def test_watchman_query_params_glob_generator(self):
query_params = format_watchman_query_params(
["**/*.java"], ["**/*Test.java"], False, "/path/to/glob", True
)
self.assertEquals(
{
"relative_root": "/path/to/glob",
"glob": ["**/*.java"],
"glob_includedotfiles": False,
"fields": ["name"],
"expression": [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
["not", ["anyof", ["match", "**/*Test.java", "wholename", {}]]],
],
},
query_params,
)
def test_flatten_dicts_overrides_earlier_keys_with_later_ones(self):
base = {"a": "foo", "b": "bar"}
override = {"a": "baz"}
override2 = {"a": 42, "c": "new"}
self.assertEquals({"a": "baz", "b": "bar"}, flatten_dicts(base, override))
self.assertEquals(
{"a": 42, "b": "bar", "c": "new"}, flatten_dicts(base, override, override2)
)
# assert none of the input dicts were changed:
self.assertEquals({"a": "foo", "b": "bar"}, base)
self.assertEquals({"a": "baz"}, override)
self.assertEquals({"a": 42, "c": "new"}, override2)
def test_path_component_starts_with_dot(self):
self.assertFalse(path_component_starts_with_dot(Path("")))
self.assertFalse(path_component_starts_with_dot(Path("foo")))
self.assertFalse(path_component_starts_with_dot(Path("foo/bar")))
self.assertTrue(path_component_starts_with_dot(Path(".foo/bar")))
self.assertTrue(path_component_starts_with_dot(Path("foo/.bar")))
self.assertTrue(path_component_starts_with_dot(Path(".foo/.bar")))
class TestHostInfo(unittest.TestCase):
def test_returns_correct_os(self):
test_data = {
"Darwin": "is_macos",
"Windows": "is_windows",
"Linux": "is_linux",
"FreeBSD": "is_freebsd",
"blarg": "is_unknown",
"unknown": "is_unknown",
}
for platform_value, expected_true_field in test_data.items():
struct = host_info(
platform_system=lambda: platform_value,
platform_machine=lambda: "x86_64",
)
self.validate_host_info_struct(
struct, "os", expected_true_field, "platform.system", platform_value
)
def test_returns_correct_arch(self):
test_data = {
"aarch64": "is_aarch64",
"arm": "is_arm",
"armeb": "is_armeb",
"i386": "is_i386",
"mips": "is_mips",
"mips64": "is_mips64",
"mipsel": "is_mipsel",
"mipsel64": "is_mipsel64",
"powerpc": "is_powerpc",
"ppc64": "is_ppc64",
"unknown": "is_unknown",
"blarg": "is_unknown",
"x86_64": "is_x86_64",
"amd64": "is_x86_64",
"arm64": "is_aarch64",
}
for platform_value, expected_true_field in test_data.items():
struct = host_info(
platform_system=lambda: "Darwin",
platform_machine=lambda: platform_value,
)
self.validate_host_info_struct(
struct, "arch", expected_true_field, "platform.machine", platform_value
)
def validate_host_info_struct(
self, struct, top_level, true_key, platform_func, platform_value
):
top_level_struct = getattr(struct, top_level)
for field in top_level_struct._fields:
if field == true_key:
continue
self.assertFalse(
getattr(top_level_struct, field),
"Expected {}.{} to be false in {} with {} returning "
"value {}".format(
top_level, field, struct, platform_func, platform_value
),
)
self.assertTrue(
getattr(top_level_struct, true_key),
"Expected {}.{} to be false in {} with {} returning "
"value {}".format(
top_level, true_key, struct, platform_func, platform_value
),
)
class TestMemoized(unittest.TestCase):
def _makeone(self, func, *args, **kwargs):
from .util import memoized
return memoized(*args, **kwargs)(func)
def test_cache_none(self):
decorated = self._makeone(lambda _retval=iter([None, "foo"]): next(_retval))
uncached = decorated()
cached = decorated()
self.assertEqual(uncached, cached)
self.assertTrue(cached is None)
def test_no_deepcopy(self):
decorated = self._makeone(lambda: [], deepcopy=False)
initial = decorated()
cached = decorated()
self.assertTrue(initial is cached)
def test_deepcopy(self):
decorated = self._makeone(lambda: [{}])
initial = decorated()
cached = decorated()
self.assertTrue(initial is not cached)
initial[0]["foo"] = "bar"
self.assertTrue(cached[0] == {})
def test_cachekey(self):
decorated = self._makeone(
# note that in Python 2 without hash randomisation, 'bar' and 'baz' will collide in
# a small dictionary, as their hash keys differ by 8.
lambda foo, bar="baz", baz="bar", _retval=itertools.count(): next(_retval)
)
initial = decorated(42, baz="spam", bar="eggs")
cached = decorated(42, bar="eggs", baz="spam")
different_keyword_values = decorated(42, bar="eric", baz="idle")
self.assertEqual(initial, cached)
self.assertNotEqual(initial, different_keyword_values)
def test_custom_cachekey(self):
decorated = self._makeone(
lambda foo, bar="baz", _retval=itertools.count(): next(_retval),
keyfunc=lambda foo, **kwargs: foo,
)
initial = decorated(42, bar="spam")
cached = decorated(42, bar="ignored")
different_foo = decorated(81, bar="spam")
self.assertEqual(initial, cached)
self.assertNotEqual(initial, different_foo)
def test_missing_foo(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {})
self.assertEqual(missing, ["foo"])
self.assertEqual(extra, [])
def test_extra_kwargs(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(
fn, [], {"parrot": "dead", "trout": "slapped"}
)
self.assertEqual(missing, ["foo"])
self.assertEqual(extra, ["parrot", "trout"])
def test_foo_as_kwarg(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {"foo": "value"})
self.assertEqual(missing, [])
self.assertEqual(extra, [])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
flyher/pymo | android/pgs4a-0.9.6/python-install/bin/smtpd.py | 4 | 18597 | #!/home/tom/ab/android/python-for-android/build/python-install/bin/python2.7
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <barry@python.org>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <person@dom.com> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| mit |
alisidd/tensorflow | tensorflow/python/client/device_lib.py | 149 | 1308 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import pywrap_tensorflow
def list_local_devices():
"""List the available devices available in the local process.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
return [_convert(s) for s in pywrap_tensorflow.list_devices()]
| apache-2.0 |
lpsinger/astropy | docs/conftest.py | 8 | 1593 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file needs to be included here to make sure commands such
# as ``pytest docs/...`` works, since this
# will ignore the conftest.py file at the root of the repository
# and the one in astropy/conftest.py
import os
import tempfile
import pytest
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
@pytest.fixture(autouse=True)
def _docdir(request):
"""Run doctests in isolated tmpdir so outputs do not end up in repo"""
# Trigger ONLY for doctestplus
doctest_plugin = request.config.pluginmanager.getplugin("doctestplus")
if isinstance(request.node.parent, doctest_plugin._doctest_textfile_item_cls):
# Don't apply this fixture to io.rst. It reads files and doesn't write
if "io.rst" not in request.node.name:
tmpdir = request.getfixturevalue('tmpdir')
with tmpdir.as_cwd():
yield
else:
yield
else:
yield
| bsd-3-clause |
thaim/ansible | lib/ansible/modules/network/f5/bigip_pool_member.py | 21 | 54988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# Copyright: (c) 2013, Matt Hite <mhite@hotmail.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_pool_member
short_description: Manages F5 BIG-IP LTM pool members
description:
- Manages F5 BIG-IP LTM pool members via iControl SOAP API.
version_added: 1.4
options:
name:
description:
- Name of the node to create, or re-use, when creating a new pool member.
- This parameter is optional and, if not specified, a node name will be
created automatically from either the specified C(address) or C(fqdn).
- The C(enabled) state is an alias of C(present).
type: str
version_added: 2.6
state:
description:
- Pool member state.
type: str
required: True
choices:
- present
- absent
- enabled
- disabled
- forced_offline
default: present
pool:
description:
- Pool name. This pool must exist.
type: str
required: True
partition:
description:
- Partition to manage resources on.
type: str
default: Common
address:
description:
- IP address of the pool member. This can be either IPv4 or IPv6. When creating a
new pool member, one of either C(address) or C(fqdn) must be provided. This
parameter cannot be updated after it is set.
type: str
aliases:
- ip
- host
version_added: 2.2
fqdn:
description:
- FQDN name of the pool member. This can be any name that is a valid RFC 1123 DNS
name. Therefore, the only characters that can be used are "A" to "Z",
"a" to "z", "0" to "9", the hyphen ("-") and the period (".").
- FQDN names must include at lease one period; delineating the host from
the domain. ex. C(host.domain).
- FQDN names must end with a letter or a number.
- When creating a new pool member, one of either C(address) or C(fqdn) must be
provided. This parameter cannot be updated after it is set.
type: str
aliases:
- hostname
version_added: 2.6
port:
description:
- Pool member port.
- This value cannot be changed after it has been set.
type: int
required: True
connection_limit:
description:
- Pool member connection limit. Setting this to 0 disables the limit.
type: int
description:
description:
- Pool member description.
type: str
rate_limit:
description:
- Pool member rate limit (connections-per-second). Setting this to 0
disables the limit.
type: int
ratio:
description:
- Pool member ratio weight. Valid values range from 1 through 100.
New pool members -- unless overridden with this value -- default
to 1.
type: int
preserve_node:
description:
- When state is C(absent) attempts to remove the node that the pool
member references.
- The node will not be removed if it is still referenced by other pool
members. If this happens, the module will not raise an error.
- Setting this to C(yes) disables this behavior.
type: bool
version_added: 2.1
priority_group:
description:
- Specifies a number representing the priority group for the pool member.
- When adding a new member, the default is 0, meaning that the member has no priority.
- To specify a priority, you must activate priority group usage when you
create a new pool or when adding or removing pool members. When activated,
the system load balances traffic according to the priority group number
assigned to the pool member.
- The higher the number, the higher the priority, so a member with a priority
of 3 has higher priority than a member with a priority of 1.
type: int
version_added: 2.5
fqdn_auto_populate:
description:
- Specifies whether the system automatically creates ephemeral nodes using
the IP addresses returned by the resolution of a DNS query for a node
defined by an FQDN.
- When C(yes), the system generates an ephemeral node for each IP address
returned in response to a DNS query for the FQDN of the node. Additionally,
when a DNS response indicates the IP address of an ephemeral node no longer
exists, the system deletes the ephemeral node.
- When C(no), the system resolves a DNS query for the FQDN of the node
with the single IP address associated with the FQDN.
- When creating a new pool member, the default for this parameter is C(yes).
- Once set this parameter cannot be changed afterwards.
- This parameter is ignored when C(reuse_nodes) is C(yes).
type: bool
version_added: 2.6
reuse_nodes:
description:
- Reuses node definitions if requested.
type: bool
default: yes
version_added: 2.6
monitors:
description:
- Specifies the health monitors that the system currently uses to monitor
this resource.
type: list
version_added: 2.8
availability_requirements:
description:
- Specifies, if you activate more than one health monitor, the number of health
monitors that must receive successful responses in order for the link to be
considered available.
- Specifying an empty string will remove the monitors and revert to inheriting from pool (default).
- Specifying C(none) value will remove any health monitoring from the member completely.
suboptions:
type:
description:
- Monitor rule type when C(monitors) is specified.
- When creating a new pool, if this value is not specified, the default of
'all' will be used.
type: str
choices:
- all
- at_least
at_least:
description:
- Specifies the minimum number of active health monitors that must be successful
before the link is considered up.
- This parameter is only relevant when a C(type) of C(at_least) is used.
- This parameter will be ignored if a type of C(all) is used.
type: int
type: dict
version_added: 2.8
ip_encapsulation:
description:
- Specifies the IP encapsulation using either IPIP (IP encapsulation within IP,
RFC 2003) or GRE (Generic Router Encapsulation, RFC 2784) on outbound packets
(from BIG-IP system to server-pool member).
- When C(none), disables IP encapsulation.
- When C(inherit), inherits IP encapsulation setting from the member's pool.
- When any other value, Options are None, Inherit from Pool, and Member Specific.
type: str
version_added: 2.8
aggregate:
description:
- List of pool member definitions to be created, modified or removed.
- When using C(aggregates) if one of the aggregate definitions is invalid, the aggregate run will fail,
indicating the error it last encountered.
- The module will C(NOT) rollback any changes it has made prior to encountering the error.
- The module also will not indicate what changes were made prior to failure, therefore it is strongly advised
to run the module in check mode to make basic validation, prior to module execution.
type: list
aliases:
- members
version_added: 2.8
replace_all_with:
description:
- Remove members not defined in the C(aggregate) parameter.
- This operation is all or none, meaning that it will stop if there are some pool members
that cannot be removed.
type: bool
default: no
aliases:
- purge
version_added: 2.8
notes:
- In previous versions of this module, which used the SDK, the C(name) parameter would act as C(fqdn) if C(address) or
C(fqdn) were not provided.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = '''
- name: Add pool member
bigip_pool_member:
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Modify pool member ratio and description
bigip_pool_member:
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
ratio: 1
description: nginx server
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Remove pool member from pool
bigip_pool_member:
state: absent
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Force pool member offline
bigip_pool_member:
state: forced_offline
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create members with priority groups
bigip_pool_member:
pool: my-pool
partition: Common
host: "{{ item.address }}"
name: "{{ item.name }}"
priority_group: "{{ item.priority_group }}"
port: 80
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
loop:
- address: 1.1.1.1
name: web1
priority_group: 4
- address: 2.2.2.2
name: web2
priority_group: 3
- address: 3.3.3.3
name: web3
priority_group: 2
- address: 4.4.4.4
name: web4
priority_group: 1
- name: Add pool members aggregate
bigip_pool_member:
pool: my-pool
aggregate:
- host: 192.168.1.1
partition: Common
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
- host: 192.168.1.2
partition: Common
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
- host: 192.168.1.3
partition: Common
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add pool members aggregate, remove non aggregates
bigip_pool_member:
pool: my-pool
aggregate:
- host: 192.168.1.1
partition: Common
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
- host: 192.168.1.2
partition: Common
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
- host: 192.168.1.3
partition: Common
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
replace_all_with: yes
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = '''
rate_limit:
description: The new rate limit, in connections per second, of the pool member.
returned: changed
type: int
sample: 100
connection_limit:
description: The new connection limit of the pool member
returned: changed
type: int
sample: 1000
description:
description: The new description of pool member.
returned: changed
type: str
sample: My pool member
ratio:
description: The new pool member ratio weight.
returned: changed
type: int
sample: 50
priority_group:
description: The new priority group.
returned: changed
type: int
sample: 3
fqdn_auto_populate:
description: Whether FQDN auto population was set on the member or not.
returned: changed
type: bool
sample: True
fqdn:
description: The FQDN of the pool member.
returned: changed
type: str
sample: foo.bar.com
address:
description: The address of the pool member.
returned: changed
type: str
sample: 1.2.3.4
monitors:
description: The new list of monitors for the resource.
returned: changed
type: list
sample: ['/Common/monitor1', '/Common/monitor2']
replace_all_with:
description: Purges all non-aggregate pool members from device
returned: changed
type: bool
sample: yes
'''
import os
import re
from copy import deepcopy
from ansible.module_utils.urls import urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import remove_default_spec
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import is_valid_hostname
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.ipaddress import validate_ip_v6_address
from library.module_utils.network.f5.icontrol import TransactionContextManager
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import is_valid_hostname
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.ipaddress import validate_ip_v6_address
from ansible.module_utils.network.f5.icontrol import TransactionContextManager
class Parameters(AnsibleF5Parameters):
api_map = {
'rateLimit': 'rate_limit',
'connectionLimit': 'connection_limit',
'priorityGroup': 'priority_group',
'monitor': 'monitors',
'inheritProfile': 'inherit_profile',
'profiles': 'ip_encapsulation',
}
api_attributes = [
'rateLimit',
'connectionLimit',
'description',
'ratio',
'priorityGroup',
'address',
'fqdn',
'session',
'state',
'monitor',
# These two settings are for IP Encapsulation
'inheritProfile',
'profiles',
]
returnables = [
'rate_limit',
'connection_limit',
'description',
'ratio',
'priority_group',
'fqdn_auto_populate',
'session',
'state',
'fqdn',
'address',
'monitors',
# IP Encapsulation related
'inherit_profile',
'ip_encapsulation',
]
updatables = [
'rate_limit',
'connection_limit',
'description',
'ratio',
'priority_group',
'fqdn_auto_populate',
'state',
'monitors',
'inherit_profile',
'ip_encapsulation',
]
class ModuleParameters(Parameters):
@property
def full_name(self):
delimiter = ':'
try:
if validate_ip_v6_address(self.full_name_dict['name']):
delimiter = '.'
except TypeError:
pass
return '{0}{1}{2}'.format(self.full_name_dict['name'], delimiter, self.port)
@property
def full_name_dict(self):
if self._values['name'] is None:
name = self._values['address'] if self._values['address'] else self._values['fqdn']
else:
name = self._values['name']
return dict(
name=name,
port=self.port
)
@property
def node_name(self):
return self.full_name_dict['name']
@property
def fqdn_name(self):
return self._values['fqdn']
@property
def fqdn(self):
result = {}
if self.fqdn_auto_populate:
result['autopopulate'] = 'enabled'
else:
result['autopopulate'] = 'disabled'
if self._values['fqdn'] is None:
return result
if not is_valid_hostname(self._values['fqdn']):
raise F5ModuleError(
"The specified 'fqdn' value of: {0} is not a valid hostname.".format(self._values['fqdn'])
)
result['tmName'] = self._values['fqdn']
return result
@property
def pool(self):
return fq_name(self.want.partition, self._values['pool'])
@property
def port(self):
if self._values['port'] is None:
raise F5ModuleError(
"Port value must be specified."
)
if 0 > int(self._values['port']) or int(self._values['port']) > 65535:
raise F5ModuleError(
"Valid ports must be in range 0 - 65535"
)
return int(self._values['port'])
@property
def address(self):
if self._values['address'] is None:
return None
elif self._values['address'] == 'any6':
return 'any6'
address = self._values['address'].split('%')[0]
if is_valid_ip(address):
return self._values['address']
raise F5ModuleError(
"The specified 'address' value of: {0} is not a valid IP address.".format(address)
)
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
if len(self._values['monitors']) == 1 and self._values['monitors'][0] == '':
return 'default'
if len(self._values['monitors']) == 1 and self._values['monitors'][0] == 'none':
return '/Common/none'
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
if self.at_least > len(self.monitors_list):
raise F5ModuleError(
"The 'at_least' value must not exceed the number of 'monitors'."
)
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def availability_requirement_type(self):
if self._values['availability_requirements'] is None:
return None
return self._values['availability_requirements']['type']
@property
def at_least(self):
return self._get_availability_value('at_least')
@property
def ip_encapsulation(self):
if self._values['ip_encapsulation'] is None:
return None
if self._values['ip_encapsulation'] == 'inherit':
return 'inherit'
if self._values['ip_encapsulation'] in ['', 'none']:
return ''
return fq_name(self.partition, self._values['ip_encapsulation'])
def _get_availability_value(self, type):
if self._values['availability_requirements'] is None:
return None
if self._values['availability_requirements'][type] is None:
return None
return int(self._values['availability_requirements'][type])
class ApiParameters(Parameters):
@property
def ip_encapsulation(self):
"""Returns a simple name for the tunnel.
The API stores the data like so
"profiles": [
{
"name": "gre",
"partition": "Common",
"nameReference": {
"link": "https://localhost/mgmt/tm/net/tunnels/gre/~Common~gre?ver=13.1.0.7"
}
}
]
This method returns that data as a simple profile name. For instance,
/Common/gre
This allows us to do comparisons of it in the Difference class and then,
as needed, translate it back to the more complex form in the UsableChanges
class.
Returns:
string: The simple form representation of the tunnel
"""
if self._values['ip_encapsulation'] is None and self.inherit_profile == 'yes':
return 'inherit'
if self._values['ip_encapsulation'] is None and self.inherit_profile == 'no':
return ''
if self._values['ip_encapsulation'] is None:
return None
# There can be only one
tunnel = self._values['ip_encapsulation'][0]
return fq_name(tunnel['partition'], tunnel['name'])
@property
def inherit_profile(self):
return flatten_boolean(self._values['inherit_profile'])
@property
def allow(self):
if self._values['allow'] is None:
return ''
if self._values['allow'][0] == 'All':
return 'all'
allow = self._values['allow']
result = list(set([str(x) for x in allow]))
result = sorted(result)
return result
@property
def rate_limit(self):
if self._values['rate_limit'] is None:
return None
if self._values['rate_limit'] == 'disabled':
return 0
return int(self._values['rate_limit'])
@property
def state(self):
if self._values['state'] in ['user-up', 'unchecked', 'fqdn-up-no-addr', 'fqdn-up'] and self._values['session'] in ['user-enabled']:
return 'present'
elif self._values['state'] in ['down', 'up', 'checking'] and self._values['session'] == 'monitor-enabled':
# monitor-enabled + checking:
# Monitor is checking to see state of pool member. For instance,
# whether it is up or down
#
# monitor-enabled + down:
# Monitor returned and determined that pool member is down.
#
# monitor-enabled + up
# Monitor returned and determined that pool member is up.
return 'present'
elif self._values['state'] in ['user-down'] and self._values['session'] in ['user-disabled']:
return 'forced_offline'
else:
return 'disabled'
@property
def availability_requirement_type(self):
if self._values['monitors'] is None:
return None
if 'min ' in self._values['monitors']:
return 'at_least'
else:
return 'all'
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
if self._values['monitors'] == 'default':
return 'default'
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('least')
@property
def fqdn_auto_populate(self):
if self._values['fqdn'] is None:
return None
if 'autopopulate' in self._values['fqdn']:
if self._values['fqdn']['autopopulate'] == 'enabled':
return True
return False
@property
def fqdn(self):
if self._values['fqdn'] is None:
return None
if 'tmName' in self._values['fqdn']:
return self._values['fqdn']['tmName']
class NodeApiParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def monitors(self):
monitor_string = self._values['monitors']
if monitor_string is None:
return None
if '{' in monitor_string and '}':
tmp = monitor_string.strip('}').split('{')
monitor = ''.join(tmp).rstrip()
return monitor
return monitor_string
class ReportableChanges(Changes):
@property
def ssl_cipher_suite(self):
default = ':'.join(sorted(Parameters._ciphers.split(':')))
if self._values['ssl_cipher_suite'] == default:
return 'default'
else:
return self._values['ssl_cipher_suite']
@property
def fqdn_auto_populate(self):
if self._values['fqdn'] is None:
return None
if 'autopopulate' in self._values['fqdn']:
if self._values['fqdn']['autopopulate'] == 'enabled':
return True
return False
@property
def fqdn(self):
if self._values['fqdn'] is None:
return None
if 'tmName' in self._values['fqdn']:
return self._values['fqdn']['tmName']
@property
def state(self):
if self._values['state'] in ['user-up', 'unchecked', 'fqdn-up-no-addr', 'fqdn-up'] and self._values['session'] in ['user-enabled']:
return 'present'
elif self._values['state'] in ['down', 'up', 'checking'] and self._values['session'] == 'monitor-enabled':
return 'present'
elif self._values['state'] in ['user-down'] and self._values['session'] in ['user-disabled']:
return 'forced_offline'
else:
return 'disabled'
@property
def monitors(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def availability_requirement_type(self):
if self._values['monitors'] is None:
return None
if 'min ' in self._values['monitors']:
return 'at_least'
else:
return 'all'
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('least'))
@property
def availability_requirements(self):
if self._values['monitors'] is None:
return None
result = dict()
result['type'] = self.availability_requirement_type
result['at_least'] = self.at_least
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.state == self.have.state:
return None
if self.want.state == 'forced_offline':
return {
'state': 'user-down',
'session': 'user-disabled'
}
elif self.want.state == 'disabled':
return {
'state': 'user-up',
'session': 'user-disabled'
}
elif self.want.state in ['present', 'enabled']:
return {
'state': 'user-up',
'session': 'user-enabled'
}
@property
def fqdn_auto_populate(self):
if self.want.fqdn_auto_populate is not None:
if self.want.fqdn_auto_populate != self.have.fqdn_auto_populate:
raise F5ModuleError(
"The fqdn_auto_populate cannot be changed once it has been set."
)
@property
def monitors(self):
if self.want.monitors is None:
return None
if self.want.monitors == 'default' and self.have.monitors == 'default':
return None
if self.want.monitors == 'default' and self.have.monitors is None:
return None
if self.want.monitors == 'default' and len(self.have.monitors) > 0:
return 'default'
# this is necessary as in v12 there is a bug where returned value has a space at the end
if self.want.monitors == '/Common/none' and self.have.monitors in ['/Common/none', '/Common/none ']:
return None
if self.have.monitors is None:
return self.want.monitors
if self.have.monitors != self.want.monitors:
return self.want.monitors
@property
def ip_encapsulation(self):
result = cmp_str_with_none(self.want.ip_encapsulation, self.have.ip_encapsulation)
if result is None:
return None
if result == 'inherit':
return dict(
inherit_profile='enabled',
ip_encapsulation=[]
)
elif result in ['', 'none']:
return dict(
inherit_profile='disabled',
ip_encapsulation=[]
)
else:
return dict(
inherit_profile='disabled',
ip_encapsulation=[
dict(
name=os.path.basename(result).strip('/'),
partition=os.path.dirname(result)
)
]
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = None
self.have = None
self.changes = None
self.replace_all_with = False
self.purge_links = list()
self.on_device = None
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
wants = None
if self.module.params['replace_all_with']:
self.replace_all_with = True
if self.module.params['aggregate']:
wants = self.merge_defaults_for_aggregate(self.module.params)
result = dict()
changed = False
if self.replace_all_with and self.purge_links:
self.purge()
changed = True
if self.module.params['aggregate']:
result['aggregate'] = list()
for want in wants:
output = self.execute(want)
if output['changed']:
changed = output['changed']
result['aggregate'].append(output)
else:
output = self.execute(self.module.params)
if output['changed']:
changed = output['changed']
result.update(output)
if changed:
result['changed'] = True
return result
def merge_defaults_for_aggregate(self, params):
defaults = deepcopy(params)
aggregate = defaults.pop('aggregate')
for i, j in enumerate(aggregate):
for k, v in iteritems(defaults):
if k != 'replace_all_with':
if j.get(k, None) is None and v is not None:
aggregate[i][k] = v
if self.replace_all_with:
self.compare_aggregate_names(aggregate)
return aggregate
def _filter_ephemerals(self):
on_device = self._read_purge_collection()
if not on_device:
self.on_device = []
return
self.on_device = [member for member in on_device if member['ephemeral'] != "true"]
def compare_fqdns(self, items):
if any('fqdn' in item for item in items):
aggregates = [item['fqdn'] for item in items if 'fqdn' in item and item['fqdn']]
collection = [member['fqdn']['tmName'] for member in self.on_device if 'tmName' in member['fqdn']]
diff = set(collection) - set(aggregates)
if diff:
fqdns = [
member['selfLink'] for member in self.on_device if 'tmName' in member['fqdn'] and member['fqdn']['tmName'] in diff]
self.purge_links.extend(fqdns)
return True
return False
return False
def compare_addresses(self, items):
if any('address' in item for item in items):
aggregates = [item['address'] for item in items if 'address' in item and item['address']]
collection = [member['address'] for member in self.on_device]
diff = set(collection) - set(aggregates)
if diff:
addresses = [item['selfLink'] for item in self.on_device if item['address'] in diff]
self.purge_links.extend(addresses)
return True
return False
return False
def compare_aggregate_names(self, items):
self._filter_ephemerals()
if not self.on_device:
return False
fqdns = self.compare_fqdns(items)
addresses = self.compare_addresses(items)
if self.purge_links:
if fqdns:
if not addresses:
self.purge_links.extend([item['selfLink'] for item in self.on_device if 'tmName' not in item['fqdn']])
def execute(self, params=None):
self.want = ModuleParameters(params=params)
self.have = ApiParameters()
self.changes = UsableChanges()
changed = False
result = dict()
state = params['state']
if state in ['present', 'enabled', 'disabled', 'forced_offline']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
elif not self.want.preserve_node and self.node_exists():
return self.remove_node_from_device()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if not self.want.preserve_node:
self.remove_node_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def purge(self):
if self.module.check_mode:
return True
if not self.pool_exist():
raise F5ModuleError('The specified pool does not exist')
self.purge_from_device()
return True
def create(self):
if self.want.reuse_nodes:
self._update_address_with_existing_nodes()
if self.want.name and not any(x for x in [self.want.address, self.want.fqdn_name]):
self._set_host_by_name()
if self.want.ip_encapsulation == '':
self.changes.update({'inherit_profile': 'enabled'})
self.changes.update({'profiles': []})
elif self.want.ip_encapsulation:
# Read the current list of tunnels so that IP encapsulation
# checking can take place.
tunnels_gre = self.read_current_tunnels_from_device('gre')
tunnels_ipip = self.read_current_tunnels_from_device('ipip')
tunnels = tunnels_gre + tunnels_ipip
if self.want.ip_encapsulation not in tunnels:
raise F5ModuleError(
"The specified 'ip_encapsulation' tunnel was not found on the system."
)
self.changes.update({'inherit_profile': 'disabled'})
self._update_api_state_attributes()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
if not self.pool_exist():
raise F5ModuleError('The specified pool does not exist')
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=fq_name(self.want.partition, self.want.pool)),
transform_name(self.want.partition, self.want.full_name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def pool_exist(self):
if self.replace_all_with:
pool_name = transform_name(name=fq_name(self.module.params['partition'], self.module.params['pool']))
else:
pool_name = transform_name(name=fq_name(self.want.partition, self.want.pool))
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
pool_name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def node_exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/node/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.node_name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def _set_host_by_name(self):
if is_valid_ip(self.want.name):
self.want.update({
'fqdn': None,
'address': self.want.name
})
else:
if not is_valid_hostname(self.want.name):
raise F5ModuleError(
"'name' is neither a valid IP address or FQDN name."
)
self.want.update({
'fqdn': self.want.name,
'address': None
})
def _update_api_state_attributes(self):
if self.want.state == 'forced_offline':
self.want.update({
'state': 'user-down',
'session': 'user-disabled',
})
elif self.want.state == 'disabled':
self.want.update({
'state': 'user-up',
'session': 'user-disabled',
})
elif self.want.state in ['present', 'enabled']:
self.want.update({
'state': 'user-up',
'session': 'user-enabled',
})
def _update_address_with_existing_nodes(self):
try:
have = self.read_current_node_from_device(self.want.node_name)
if self.want.fqdn_auto_populate and self.want.reuse_nodes:
self.module.warn("'fqdn_auto_populate' is discarded in favor of the re-used node's auto-populate setting.")
self.want.update({
'fqdn_auto_populate': True if have.fqdn['autopopulate'] == 'enabled' else False
})
if 'tmName' in have.fqdn:
self.want.update({
'fqdn': have.fqdn['tmName'],
'address': 'any6'
})
else:
self.want.update({
'address': have.address
})
except Exception:
return None
def _read_purge_collection(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=fq_name(self.module.params['partition'], self.module.params['pool']))
)
query = '?$select=name,selfLink,fqdn,address,ephemeral'
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response:
return response['items']
return []
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.full_name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=fq_name(self.want.partition, self.want.pool)),
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=fq_name(self.want.partition, self.want.pool)),
transform_name(self.want.partition, self.want.full_name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=fq_name(self.want.partition, self.want.pool)),
transform_name(self.want.partition, self.want.full_name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def remove_node_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/node/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.node_name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/pool/{2}/members/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=fq_name(self.want.partition, self.want.pool)),
transform_name(self.want.partition, self.want.full_name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
# Read the current list of tunnels so that IP encapsulation
# checking can take place.
tunnels_gre = self.read_current_tunnels_from_device('gre')
tunnels_ipip = self.read_current_tunnels_from_device('ipip')
response['tunnels'] = tunnels_gre + tunnels_ipip
return ApiParameters(params=response)
def read_current_node_from_device(self, node):
uri = "https://{0}:{1}/mgmt/tm/ltm/node/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, node)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return NodeApiParameters(params=response)
def read_current_tunnels_from_device(self, tunnel_type):
uri = "https://{0}:{1}/mgmt/tm/net/tunnels/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
tunnel_type
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' not in response:
return []
return [x['fullPath'] for x in response['items']]
def _prepare_links(self, collection):
# this is to ensure no duplicates are in the provided collection
no_dupes = list(set(collection))
links = list()
purge_paths = [urlparse(link).path for link in no_dupes]
for path in purge_paths:
link = "https://{0}:{1}{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
path
)
links.append(link)
return links
def purge_from_device(self):
links = self._prepare_links(self.purge_links)
with TransactionContextManager(self.client) as transact:
for link in links:
resp = transact.api.delete(link)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
element_spec = dict(
address=dict(aliases=['host', 'ip']),
fqdn=dict(
aliases=['hostname']
),
name=dict(),
port=dict(type='int'),
connection_limit=dict(type='int'),
description=dict(),
rate_limit=dict(type='int'),
ratio=dict(type='int'),
preserve_node=dict(type='bool'),
priority_group=dict(type='int'),
state=dict(
default='present',
choices=['absent', 'present', 'enabled', 'disabled', 'forced_offline']
),
fqdn_auto_populate=dict(type='bool'),
reuse_nodes=dict(type='bool', default=True),
availability_requirements=dict(
type='dict',
options=dict(
type=dict(
choices=['all', 'at_least'],
required=True
),
at_least=dict(type='int'),
),
required_if=[
['type', 'at_least', ['at_least']],
]
),
monitors=dict(type='list'),
ip_encapsulation=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
self.argument_spec = dict(
aggregate=dict(
type='list',
elements='dict',
options=aggregate_spec,
aliases=['members'],
mutually_exclusive=[
['address', 'fqdn']
],
required_one_of=[
['address', 'fqdn']
],
),
replace_all_with=dict(
type='bool',
aliases=['purge'],
default='no'
),
pool=dict(required=True),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
)
self.argument_spec.update(element_spec)
self.argument_spec.update(f5_argument_spec)
self.mutually_exclusive = [
['address', 'aggregate'],
['fqdn', 'aggregate']
]
self.required_one_of = [
['address', 'fqdn', 'aggregate'],
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| mit |
andrewleech/script.module.raven | lib/raven/transport/gevent.py | 20 | 1660 | """
raven.transport.gevent
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from raven.transport.base import AsyncTransport
from raven.transport.http import HTTPTransport
try:
import gevent
# gevent 1.0bN renamed coros to lock
try:
from gevent.lock import Semaphore
except ImportError:
from gevent.coros import Semaphore # NOQA
has_gevent = True
except:
has_gevent = None
class GeventedHTTPTransport(AsyncTransport, HTTPTransport):
scheme = ['gevent+http', 'gevent+https']
def __init__(self, parsed_url, maximum_outstanding_requests=100, *args, **kwargs):
if not has_gevent:
raise ImportError('GeventedHTTPTransport requires gevent.')
self._lock = Semaphore(maximum_outstanding_requests)
super(GeventedHTTPTransport, self).__init__(parsed_url, *args, **kwargs)
def async_send(self, data, headers, success_cb, failure_cb):
"""
Spawn an async request to a remote webserver.
"""
# this can be optimized by making a custom self.send that does not
# read the response since we don't use it.
self._lock.acquire()
return gevent.spawn(
super(GeventedHTTPTransport, self).send, data, headers
).link(lambda x: self._done(x, success_cb, failure_cb))
def _done(self, greenlet, success_cb, failure_cb, *args):
self._lock.release()
if greenlet.successful():
success_cb()
else:
failure_cb(greenlet.exception)
| bsd-3-clause |
francescobaldi86/Ecos2015PaperExtension | Python files/input.py | 1 | 6118 | import os
from helpers import d2df
import numpy as np
def filenames(project_path):
#project_path = os.path.realpath('..')
#project_path = os.path.dirname(os.path.realpath(__file__))
#project_path = project_path + os.sep + ".."
output = {}
# Input files
output["dataset_raw"] = project_path + os.sep + 'Database' + os.sep +'selected_df.h5'
output["headers_translate"] = project_path + os.sep + 'General' + os.sep +'headers_dict.xlsx'
output["consistency_check_input"] = project_path + os.sep + 'Data_Process'+ os.sep +'check_input.csv'
# Output files
output["dataset_output_empty"] = project_path + os.sep +'Data_Process' + os.sep + 'database_out_empty.h5'
output["dataset_output"] = project_path + os.sep +'Data_Process' + os.sep + 'database_out.h5'
output["consistency_check_report"] = project_path + os.sep + 'Data_Process' + os.sep + 'check_report.txt'
output["aggregated_efficiencies"] = project_path + os.sep + 'Results' + os.sep + 'aggregated_efficiencies.csv'
output["aggregated_flows_energy"] = project_path + os.sep + 'Results' + os.sep + 'aggregated_flows_energy.csv'
output["aggregated_flows_exergy"] = project_path + os.sep + 'Results' + os.sep + 'aggregated_flows_exergy.csv'
output["flows_clustering"] = project_path + os.sep + 'Results' + os.sep + 'flows_clustering.csv'
output["latex_table"] = project_path + os.sep + 'Manuscript' + os.sep + 'Figures' + os.sep + 'table.tex'
# opening the report file and inititing it
text_file = open(output["consistency_check_report"],"w") # Cleaning the file
text_file.write("=== STARTING THE REPORT FILE === \n \n")
text_file.close()
return output
def assumptions(raw, processed, CONSTANTS, hd):
# This function includes generic assumed values in the main structure
# ALL ENGINES
for system in {"ME1", "ME2", "ME3", "ME4", "AE1", "AE2", "AE3", "AE4"}:
# The pressure at the turbocharger air inlet and exhaust outlet is equal to the atmospheric pressure
processed.loc[:,d2df(system,"Comp","Air_in","p")] = CONSTANTS["General"]["P_ATM"]
processed.loc[:,d2df(system, "Turbine", "Mix_out", "p")] = CONSTANTS["General"]["P_ATM"]
# Temperature in the engine room, i.e. inlet to the compressor of the TC
processed[d2df(system, "Comp", "Air_in", "T")] = raw[hd["ER_AIR_T_"]] + 273.15
# Assuming that the pressure in the exhaust gas is 90% of the pressure in the inlet manifold. Somewhat reasonable
processed[d2df(system, "Cyl", "EG_out", "p")] = (0.98 * raw[hd[system + "-CAC_AIR_P_OUT"]] + 1.01325) * 100000
# Assuming the pressure of the fuel to be around 9 barg, based on measurements from ME4
processed.loc[:,d2df(system, "Cyl", "FuelPh_in", "p")] = (9 + 1.01325) * 10e5
# Assuming the temperature of the cylinder wall to be 150 degC
processed.loc[:,d2df(system, "Cyl", "QdotJW_out", "T")] = 150 + 273.15
processed.loc[:,d2df(system, "JWC", "QdotJW_in", "T")] = 150 + 273.15
# Assuming a temperature of 100 degC for heat losses from the TC shaft
processed.loc[:,d2df(system, "TCshaft", "Losses_out", "T")] = 100 + 273.15
# We assume the mixgin temperature at the LT inlet of the HT mixer to be constant
processed.loc[:,d2df(system, "HTmerge", "LTWater_in", "T")] = CONSTANTS["MainEngines"]["T_COOLING_MIX"]
if system in {"ME1", "ME2", "ME3", "ME4"}:
processed.loc[:,d2df(system, "Cyl", "QdotRad_out", "Edot")] = 0.01 * CONSTANTS["MainEngines"]["MCR"]
# Assuming the steam pressure and temperature in the HRSG to be constant...
hrsg_pressure_assumption = (6 + 1.01325) * 100000
# Adding radiative losses
processed.loc[:, d2df(system, "Cyl", "QdotRad_out", "T")] = 100 + 273.15
if system in {"AE1", "AE2", "AE3", "AE4"}:
processed.loc[:,d2df(system, "AG", "Losses_out", "T")] = 100 + 273.15
processed.loc[:,d2df(system, "Cyl", "Power_out", "omega")] = 750
processed.loc[:, d2df(system, "Cyl", "QdotRad_out", "Edot")] = 0.01 * CONSTANTS["AuxEngines"]["MCR"]
# Others
processed.loc[:,"T_0"] = raw[hd["water_T_forsmark_smhi-opendata"]] + 273.15
processed.loc[:,"T_air"] = raw[hd["air_T_sv_hogarna_smhi-opendata"]] + 273.15
processed.loc[:,"ShipSpeed"] = raw[hd["SHIP_SPEED_KNOT_"]]
processed[d2df("CoolingSystems","SWC13","SeaWater_out","T")] = raw[hd["SWC13_SW_T_OUT"]] + 273.15 # CHECK IF IT IS IN OR OUT
processed[d2df("CoolingSystems","SWC24","SeaWater_out","T")] = raw[hd["SWC24_SW_T_OUT"]] + 273.15 # CHECK IF IT IS IN OR OUT
# Boilers
processed.loc[:, "Steam:Boiler1:EG_out:p"] = 101325 + 10000
processed.loc[:, "Steam:Boiler1:Air_in:T"] = raw[hd["ER_AIR_T_"]] + 273.15
processed.loc[:, "Steam:Boiler1:Air_in:p"] = 101325
# HTHR system
processed.loc[:,"HTHR:SteamHeater:HRWater_out:T"] = 90 + 273.15 # From the heat balance, the temperature needs to rise at 90 degrees
# processed.loc[:,"HTHR:SteamHeater:HRWater_out:mdot"] = 298 / 3600 * CONSTANTS["General"]["RHO_W"] # the original value is in m3/h
processed.loc[:,"HTHR:HTHR24:HRWater_in:T"] = CONSTANTS["OtherUnits"]["HEAT_DEMAND"]["HTHR_INLET_TEMPERATURE"] # Assumption on the Temperature at the HTHR-24 inlet (HT side)
processed.loc[:, "HTHR:HVACpreheater:Qdot_out:T"] = (50 - 23) / np.log((50+273.15)/(23+273.15))
processed.loc[:, "HTHR:HVACreheater:Qdot_out:T"] = (80 - 60) / np.log((80 + 273.15) / (60 + 273.15))
processed.loc[:, "HTHR:HotWaterHeater:Qdot_out:T"] = 70 + 273.15
processed.loc[:, "Steam:TankHeating:Qdot_out:T"] = 60 + 273.15
processed.loc[:, "Steam:MachinerySpaceHeaters:Qdot_out:T"] = processed["HTHR:HVACpreheater:Qdot_out:T"]
processed.loc[:, "Steam:Galley:Qdot_out:T"] = 90 + 273.15
processed.loc[:, "Steam:OtherTanks:Qdot_out:T"] = 60 + 273.15
processed.loc[:, "Steam:HFOtankHeating:Qdot_out:T"] = 75 + 273.15 # some sort of average value...
processed.loc[:, "Steam:HFOheater:Qdot_out:T"] = (110 - 75) / np.log((110 + 273.15) / (75 + 273.15))
return processed
| mit |
jorge2703/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
jotes/ansible-modules-core | cloud/google/gce_net.py | 104 | 9311 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compute Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800;udp:1-25')
required: false
default: null
aliases: []
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
required: false
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
required: false
default: null
aliases: ['fwrule']
name:
description:
- name of the network
required: false
default: null
aliases: []
src_range:
description:
- the source IPv4 address range in CIDR notation
required: false
default: null
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
required: false
default: null
aliases: []
target_tags:
version_added: "1.9"
description:
- the target instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the network or firewall
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
module: gce_net
name: privatenet
fwname: all-web-webproxy
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def format_allowed_section(allowed):
"""Format each section of the allowed list"""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
else:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return return_val
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
return_value = []
if allowed.count(";") == 0:
return [format_allowed_section(allowed)]
else:
sections = allowed.split(";")
for section in sections:
return_value.append(format_allowed_section(section))
return return_value
def main():
module = AnsibleModule(
argument_spec = dict(
allowed = dict(),
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(type='list'),
src_tags = dict(type='list'),
target_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.exit_json(msg='libcloud with GCE support (0.13.3+) required for this module')
gce = gce_connect(module)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
target_tags = module.params.get('target_tags')
state = module.params.get('state')
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
json_output['ipv4_range'] = network.cidr
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range:
module.fail_json(msg="Missing required 'ipv4_range' parameter",
changed=False)
try:
network = gce.ex_create_network(name, ipv4_range)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + \
"firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
changed = True
except ResourceExistsError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
json_output['target_tags'] = target_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
elif name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
# json_output['d1'] = 'found network name %s' % name
except ResourceNotFoundError:
# json_output['d2'] = 'not found network name %s' % name
pass
except Exception, e:
# json_output['d3'] = 'error with %s' % name
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
# json_output['d4'] = 'deleting %s' % name
gce.ex_destroy_network(network)
# json_output['d5'] = 'deleted %s' % name
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/plat-freebsd5/IN.py | 374 | 7777 | # Generated by h2py from /usr/include/netinet/in.h
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_PIM = 103
IPPROTO_PGM = 113
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
# Included from netinet6/in6.h
# Included from sys/queue.h
def SLIST_HEAD_INITIALIZER(head): return \
def SLIST_ENTRY(type): return \
def STAILQ_HEAD_INITIALIZER(head): return \
def STAILQ_ENTRY(type): return \
def LIST_HEAD_INITIALIZER(head): return \
def LIST_ENTRY(type): return \
def TAILQ_HEAD_INITIALIZER(head): return \
def TAILQ_ENTRY(type): return \
def CIRCLEQ_ENTRY(type): return \
__KAME_VERSION = "20000701/FreeBSD-current"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_PKTINFO = 19
IPV6_HOPLIMIT = 20
IPV6_NEXTHOP = 21
IPV6_HOPOPTS = 22
IPV6_DSTOPTS = 23
IPV6_RTHDR = 24
IPV6_PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_BINDV6ONLY = 27
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_BINDV6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_MAXID = 28
| lgpl-3.0 |
livni/old-OK | src/knesset/user/migrations/0002_follow_with_activity_stream.py | 1 | 11633 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from actstream import follow
from django.contrib.auth.models import User
from knesset.mks.models import Party, Member
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for profile in orm.UserProfile.objects.all():
user = User.objects.get(pk=profile.user.id)
for mk in profile.followed_members.all():
follow(user, mk)
for party in profile.followed_parties.all():
follow(user, party)
def backwards(self, orm):
''' This back migration fails, sorry
for i in orm['actstream.follow'].objects.filter(
content_type__in = (ContentType.objects.get_for_model(Party),
ContentType.objects.get_for_model(Member))):
i.delete()
'''
pass
models = {
'actstream.action': {
'Meta': {'object_name': 'Action'},
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'actstream.follow': {
'Meta': {'object_name': 'Follow'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'user.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'followed_members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'followed_parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers'", 'symmetrical': 'False', 'to': "orm['mks.Party']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['user', 'actstream']
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.