repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
levigross/pyscanner | mytests/django/conf/locale/de/formats.py | 329 | 1288 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
beckastar/django | django/contrib/gis/db/backends/spatialite/base.py | 17 | 3210 | import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (Database,
DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper)
from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient
from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation
from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection
from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations
from django.utils import six
class DatabaseWrapper(SQLiteDatabaseWrapper):
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.creation = SpatiaLiteCreation(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
| bsd-3-clause |
jucacrispim/toxicbuild | tests/integrations_functional/environment.py | 1 | 2777 | # -*- coding: utf-8 -*-
# Copyright 2018 Juca Crispim <juca@poraodojuca.net>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from toxicbuild.master.users import User
from tests.webui import SeleniumBrowser
from tests.webui.environment import create_root_user
from tests.integrations_functional import start_all, stop_all
def create_browser(context):
"""Creates a new selenium browser using Chrome driver and
sets it in the behave context.
:param context: Behave's context."""
context.browser = SeleniumBrowser()
def quit_browser(context):
"""Quits the selenium browser.
:param context: Behave's context."""
context.browser.quit()
async def del_repo(context):
"""Deletes the repositories created in tests."""
from toxicbuild.common.exchanges import scheduler_action, conn
from toxicbuild.master import settings as master_settings
await conn.connect(**master_settings.RABBITMQ_CONNECTION)
await scheduler_action.declare()
await scheduler_action.queue_delete()
await scheduler_action.connection.disconnect()
from toxicbuild.master.repository import Repository as RepoModel
await RepoModel.drop_collection()
async def create_user(context):
user = User(email='someguy@bla.com', is_superuser=True)
user.set_password('123')
await user.save()
context.user = user
context.user.id = str(context.user.id)
async def del_user(context):
await context.user.delete()
def before_all(context):
start_all()
create_browser(context)
loop = asyncio.get_event_loop()
loop.run_until_complete(create_user(context))
loop.run_until_complete(create_root_user(context))
def after_feature(context, feature):
loop = asyncio.get_event_loop()
loop.run_until_complete(del_repo(context))
from toxicbuild.integrations.github import GithubIntegration
loop.run_until_complete(GithubIntegration.drop_collection())
def after_all(context):
stop_all()
loop = asyncio.get_event_loop()
loop.run_until_complete(del_user(context))
loop.run_until_complete(User.drop_collection())
quit_browser(context)
| agpl-3.0 |
kongbaguni/qmk_fimware | lib/lufa/Demos/Device/ClassDriver/GenericHID/HostTestApp/test_generic_hid_libusb.py | 46 | 3169 | #!/usr/bin/env python
"""
LUFA Library
Copyright (C) Dean Camera, 2017.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
"""
"""
LUFA Generic HID device demo host test script. This script will send a
continuous stream of generic reports to the device, to show a variable LED
pattern on the target board. Send and received report data is printed to
the terminal.
Requires the PyUSB library (http://sourceforge.net/apps/trac/pyusb/).
"""
import sys
from time import sleep
import usb.core
import usb.util
# Generic HID device VID, PID and report payload length (length is increased
# by one to account for the Report ID byte that must be pre-pended)
device_vid = 0x03EB
device_pid = 0x204F
def get_and_init_hid_device():
device = usb.core.find(idVendor=device_vid, idProduct=device_pid)
if device is None:
sys.exit("Could not find USB device.")
if device.is_kernel_driver_active(0):
try:
device.detach_kernel_driver(0)
except usb.core.USBError as exception:
sys.exit("Could not detatch kernel driver: %s" % str(exception))
try:
device.set_configuration()
except usb.core.USBError as exception:
sys.exit("Could not set configuration: %s" % str(exception))
return device
def send_led_pattern(device, led1, led2, led3, led4):
# Report data for the demo is LED on/off data
report_data = [led1, led2, led3, led4]
# Send the generated report to the device
number_of_bytes_written = device.ctrl_transfer( # Set Report control request
0b00100001, # bmRequestType (constant for this control request)
0x09, # bmRequest (constant for this control request)
0, # wValue (MSB is report type, LSB is report number)
0, # wIndex (interface number)
report_data # report data to be sent
);
assert number_of_bytes_written == len(report_data)
print("Sent LED Pattern: {0}".format(report_data))
def receive_led_pattern(hid_device):
endpoint = hid_device[0][(0,0)][0]
report_data = hid_device.read(endpoint.bEndpointAddress, endpoint.wMaxPacketSize)
return list(report_data)
def main():
hid_device = get_and_init_hid_device()
print("Connected to device 0x%04X/0x%04X - %s [%s]" %
(hid_device.idVendor, hid_device.idProduct,
usb.util.get_string(hid_device, 256, hid_device.iProduct),
usb.util.get_string(hid_device, 256, hid_device.iManufacturer)))
p = 0
while (True):
# Convert the current pattern index to a bit-mask and send
send_led_pattern(hid_device,
(p >> 3) & 1,
(p >> 2) & 1,
(p >> 1) & 1,
(p >> 0) & 1)
# Receive and print the current LED pattern
led_pattern = receive_led_pattern(hid_device)[0:4]
print("Received LED Pattern: {0}".format(led_pattern))
# Compute next LED pattern in sequence
p = (p + 1) % 16
# Delay a bit for visual effect
sleep(.2)
if __name__ == '__main__':
main()
| gpl-2.0 |
ecino/compassion-modules | partner_communication/wizards/change_text_wizard.py | 4 | 3061 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, api, fields, _
from odoo.exceptions import UserError
class ChangeTextWizard(models.TransientModel):
_name = 'partner.communication.change.text.wizard'
template_text = fields.Text(default=lambda s: s._default_text())
state = fields.Char(default='edit')
preview = fields.Html(readonly=True)
@api.model
def _default_text(self):
context = self.env.context
communications = self.env[context['active_model']].browse(
context['active_ids'])
config = communications.mapped('config_id')
lang = list(set(communications.mapped('partner_id.lang')))
if len(config) != 1:
raise UserError(_("You can only update text "
"on one communication type at time."))
if len(lang) != 1:
raise UserError(
_("Please update only one language at a time."))
return config.email_template_id.with_context(lang=lang[0]).body_html
@api.multi
def update(self):
""" Refresh the texts of communications given the new template. """
self.ensure_one()
context = self.env.context
communications = self.env[context['active_model']].browse(
context['active_ids'])
config = communications.mapped('config_id')
lang = communications[0].partner_id.lang
template = config.email_template_id.with_context(lang=lang)
if len(config) != 1:
raise UserError(
_("You can only update text on one communication "
"type at time."))
new_texts = template.render_template(
self.template_text,
template.model, communications.ids)
for comm in communications:
comm.body_html = new_texts[comm.id]
return True
@api.multi
def get_preview(self):
context = self.env.context
communication = self.env[context['active_model']].browse(
context['active_id'])
template = communication.email_template_id
self.preview = template.render_template(
self.template_text, template.model, communication.ids)[
communication.id]
self.state = 'preview'
return self._reload()
@api.multi
def edit(self):
self.state = 'edit'
return self._reload()
def _reload(self):
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': self.id,
'res_model': self._name,
'target': 'new',
'context': self.env.context,
}
| agpl-3.0 |
avocado-framework/avocado-vt | virttest/utils_logfile.py | 2 | 3735 | """
Control log file utility functions.
An easy way to log lines to files when the logging system can't be used
:copyright: 2020 Red Hat Inc.
"""
import os
import time
import threading
from avocado.utils import aurl
from avocado.utils import path as utils_path
from aexpect.utils.genio import _open_log_files
from avocado.utils.astring import string_safe_encode
from virttest import data_dir
_log_file_dir = data_dir.get_tmp_dir()
_log_lock = threading.RLock()
def _acquire_lock(lock, timeout=10):
"""
Check if the lock is available
:param lock: threading.RLock object
:param timeout: time to Wait for the lock
:return: boolean. True if the lock is available
False if the lock is unavailable
"""
end_time = time.time() + timeout
while time.time() < end_time:
if lock.acquire(False):
return True
time.sleep(0.05)
return False
class LogLockError(Exception):
pass
def log_line(filename, line):
"""
Write a line to a file.
:param filename: Path of file to write to, either absolute or relative to
the dir set by set_log_file_dir().
:param line: Line to write.
:raise LogLockError: If the lock is unavailable
"""
global _open_log_files, _log_file_dir, _log_lock
if not _acquire_lock(_log_lock):
raise LogLockError("Could not acquire exclusive lock to access"
" _open_log_files")
log_file = get_log_filename(filename)
base_file = os.path.basename(log_file)
try:
if base_file not in _open_log_files:
# First, let's close the log files opened in old directories
close_log_file(base_file)
# Then, let's open the new file
try:
os.makedirs(os.path.dirname(log_file))
except OSError:
pass
_open_log_files[base_file] = open(log_file, "w")
timestr = time.strftime("%Y-%m-%d %H:%M:%S")
try:
line = string_safe_encode(line)
except UnicodeDecodeError:
line = line.decode("utf-8", "ignore").encode("utf-8")
_open_log_files[base_file].write("%s: %s\n" % (timestr, line))
_open_log_files[base_file].flush()
finally:
_log_lock.release()
def set_log_file_dir(directory):
"""
Set the base directory for log files created by log_line()
:param directory: Directory for log files
"""
global _log_file_dir
_log_file_dir = directory
def get_log_file_dir():
"""
Get the base directory for log files created by log_line()
"""
global _log_file_dir
return _log_file_dir
def get_log_filename(filename):
"""
Return full path of log file name
:param filename: Log file name
:return: str. The full path of the log file
"""
if aurl.is_url(filename):
return filename
return os.path.realpath(
os.path.abspath(utils_path.get_path(_log_file_dir, filename)))
def close_log_file(filename):
"""
Close the log file
:param filename: Log file name
:raise: LogLockError if the lock is unavailable
"""
global _open_log_files, _log_file_dir, _log_lock
remove = []
if not _acquire_lock(_log_lock):
raise LogLockError("Could not acquire exclusive lock to access"
" _open_log_files")
try:
for k in _open_log_files:
if os.path.basename(k) == filename:
f = _open_log_files[k]
f.close()
remove.append(k)
if remove:
for key_to_remove in remove:
_open_log_files.pop(key_to_remove)
finally:
_log_lock.release()
| gpl-2.0 |
dmacd/FB-try1 | pyramid/frameworkbenchmarks/models.py | 4 | 1820 | """
Benchmark models.
"""
import json
import psycopg2
from collections import Iterable
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
DBHOSTNAME = 'localhost'
def get_conn():
return psycopg2.connect(
user = 'benchmarkdbuser',
password = 'benchmarkdbpass',
host = DBHOSTNAME,
port = '5432',
database = 'hello_world'
)
conn_pool = QueuePool(get_conn, pool_size=100, max_overflow=25, echo=False)
pg = create_engine('postgresql://', pool=conn_pool)
DBSession = sessionmaker(bind=pg)()
metadata = MetaData()
DatabaseBase = declarative_base()
def sqlalchemy_encoder_factory(system_values):
return SQLAlchemyEncoder()
class SQLAlchemyEncoder(json.JSONEncoder):
def __call__(self, obj, system_values):
if isinstance(obj, Iterable):
return json.dumps([self.default(x) for x in obj])
else:
return json.dumps(self.default(obj))
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
return obj.__json__()
return super(SQLAlchemyEncoder, self).default(obj)
class World(DatabaseBase):
__tablename__ = 'World'
id = Column('id', Integer, primary_key=True)
randomNumber = Column('randomnumber', Integer, nullable=False, server_default='0')
def __json__(self):
return {'id': self.id, 'randomNumber': self.randomNumber}
class Fortune(DatabaseBase):
__tablename__ = 'Fortune'
id = Column('id', Integer, primary_key=True)
message = Column('message', String, nullable=False)
def __json__(self):
return {'id': self.id, 'message': self.message}
| bsd-3-clause |
ChakriCherukuri/bqplot | bqplot/colorschemes.py | 5 | 2808 |
# These color schemes come from d3: http://d3js.org/
#
# They are licensed under the following license:
#
# Copyright (c) 2010-2015, Michael Bostock
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The name Michael Bostock may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#: 10 colors that work well together as data category colors
CATEGORY10 = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
#: 20 colors that work well together as data category colors
CATEGORY20 = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a',
'#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94',
'#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d',
'#17becf', '#9edae5']
#: 20 colors that work well together as data category colors
CATEGORY20b = ['#393b79', '#5254a3', '#6b6ecf', '#9c9ede', '#637939',
'#8ca252', '#b5cf6b', '#cedb9c', '#8c6d31', '#bd9e39',
'#e7ba52', '#e7cb94', '#843c39', '#ad494a', '#d6616b',
'#e7969c', '#7b4173', '#a55194', '#ce6dbd', '#de9ed6']
#: 20 colors that work well together as data category colors
CATEGORY20c = ['#3182bd', '#6baed6', '#9ecae1', '#c6dbef', '#e6550d',
'#fd8d3c', '#fdae6b', '#fdd0a2', '#31a354', '#74c476',
'#a1d99b', '#c7e9c0', '#756bb1', '#9e9ac8', '#bcbddc',
'#dadaeb', '#636363', '#969696', '#bdbdbd', '#d9d9d9']
| apache-2.0 |
sinjar666/fbthrift | thrift/lib/py/transport/TSSLSocket.py | 15 | 7627 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .TSocket import *
from .TTransport import *
import socket
import ssl
import traceback
import sys
# workaround for a python bug. see http://bugs.python.org/issue8484
import hashlib
class TSSLSocket(TSocket):
"""Socket implementation that communicates over an SSL/TLS encrypted
channel."""
def __init__(self, host='localhost', port=9090, unix_socket=None,
ssl_version=ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_NONE,
ca_certs=None,
verify_name=False,
keyfile=None,
certfile=None):
"""Initialize a TSSLSocket.
@param ssl_version(int) protocol version. see ssl module.
@param cert_reqs(int) whether to verify peer certificate. see ssl
module.
@param ca_certs(str) filename containing trusted root certs.
@param verify_name if False, no peer name validation is performed
if True, verify subject name of peer vs 'host'
if a str, verify subject name of peer vs given
str
@param keyfile filename containing the client's private key
@param certfile filename containing the client's cert and
optionally the private key
"""
TSocket.__init__(self, host, port, unix_socket)
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.verify_name = verify_name
self.client_keyfile = keyfile
self.client_certfile = certfile
def open(self):
TSocket.open(self)
try:
sslh = ssl.SSLSocket(self.handle,
ssl_version=self.ssl_version,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
keyfile=self.client_keyfile,
certfile=self.client_certfile)
if self.verify_name:
# validate the peer certificate commonName against the
# hostname (or given name) that we were expecting.
cert = sslh.getpeercert()
str_type = (str, unicode) if sys.version_info[0] < 3 else str
if isinstance(self.verify_name, str_type):
valid_names = self._getCertNames(cert)
name = self.verify_name
else:
valid_names = self._getCertNames(cert, "DNS")
name = self.host
match = False
for valid_name in valid_names:
if self._matchName(name, valid_name):
match = True
break
if not match:
sslh.close()
raise TTransportException(TTransportException.NOT_OPEN,
"failed to verify certificate name")
self.handle = sslh
except ssl.SSLError as e:
raise TTransportException(TTransportException.NOT_OPEN,
"SSL error during handshake: " + str(e))
except socket.error as e:
raise TTransportException(TTransportException.NOT_OPEN,
"socket error during SSL handshake: " + str(e))
@staticmethod
def _getCertNames(cert, includeAlt=None):
"""Returns a set containing the common name(s) for the given cert. If
includeAlt is not None, then X509v3 alternative names of type includeAlt
(e.g. 'DNS', 'IPADD') will be included as potential matches."""
# The "subject" field is a tuple containing the sequence of relative
# distinguished names (RDNs) given in the certificate's data structure
# for the principal, and each RDN is a sequence of name-value pairs.
names = set()
for rdn in cert.get('subject', ()):
for k, v in rdn:
if k == 'commonName':
names.add(v)
if includeAlt:
for k, v in cert.get('subjectAltName', ()):
if k == includeAlt:
names.add(v)
return names
@staticmethod
def _matchName(name, pattern):
"""match a DNS name against a pattern. match is not case sensitive.
a '*' in the pattern will match any single component of name."""
name_parts = name.split('.')
pattern_parts = pattern.split('.')
if len(name_parts) != len(pattern_parts):
return False
for n, p in zip(name_parts, pattern_parts):
if p != '*' and (n.lower() != p.lower()):
return False
return True
class TSSLServerSocket(TServerSocket):
"""
SSL implementation of TServerSocket
Note that this does not support TNonblockingServer
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self, port=9090, certfile='cert.pem', unix_socket=None):
"""Initialize a TSSLServerSocket
@param certfile: The filename of the server certificate file, defaults
to cert.pem
@type certfile: str
@param port: The port to listen on for inbound connections.
@type port: int
"""
self.setCertfile(certfile)
self.setCertReqs(ssl.CERT_NONE, None)
TServerSocket.__init__(self, port, unix_socket)
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new
connections.
@param certfile: The filename of the server certificate, i.e.
'/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or
unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def setCertReqs(self, cert_reqs, ca_certs):
"""Set or change the parameters used to validate the client's
certificate. The parameters behave the same as the arguments to
python's ssl.wrap_socket() method with the same name.
"""
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
def accept(self):
plain_client, addr = self._sock_accept()
try:
client = ssl.wrap_socket(plain_client,
certfile=self.certfile,
server_side=True,
ssl_version=self.SSL_VERSION,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs)
except ssl.SSLError as ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer
# derived serve() methods.
# Instead, return None, and let the TServer instance deal with it
# in other exception handling. (but TSimpleServer dies anyway)
print(traceback.print_exc())
return None
return self._makeTSocketFromAccepted((client, addr))
| apache-2.0 |
dstockwell/blink | Source/bindings/scripts/utilities.py | 7 | 13027 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions (file reading, simple IDL parsing by regexes) for IDL build.
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
import os
import cPickle as pickle
import re
import string
import subprocess
KNOWN_COMPONENTS = frozenset(['core', 'modules'])
KNOWN_COMPONENTS_WITH_TESTING = frozenset(['core', 'modules', 'testing'])
def idl_filename_to_interface_name(idl_filename):
# interface name is the root of the basename: InterfaceName.idl
return os.path.splitext(os.path.basename(idl_filename))[0]
def idl_filename_to_component_with_known_components(idl_filename, known_components):
path = os.path.dirname(os.path.realpath(idl_filename))
while path:
dirname, basename = os.path.split(path)
if not basename:
break
if basename.lower() in known_components:
return basename.lower()
path = dirname
raise Exception('Unknown component type for %s' % idl_filename)
def idl_filename_to_component(idl_filename):
return idl_filename_to_component_with_known_components(idl_filename, KNOWN_COMPONENTS)
def is_testing_target(idl_filename):
component = idl_filename_to_component_with_known_components(idl_filename, KNOWN_COMPONENTS_WITH_TESTING)
return component == 'testing'
# See whether "component" can depend on "dependency" or not:
# Suppose that we have interface X and Y:
# - if X is a partial interface and Y is the original interface,
# use is_valid_component_dependency(X, Y).
# - if X implements Y, use is_valid_component_dependency(X, Y)
# Suppose that X is a cpp file and Y is a header file:
# - if X includes Y, use is_valid_component_dependency(X, Y)
def is_valid_component_dependency(component, dependency):
assert component in KNOWN_COMPONENTS
assert dependency in KNOWN_COMPONENTS
if component == 'core' and dependency == 'modules':
return False
return True
class ComponentInfoProvider(object):
"""Base class of information provider which provides component-specific
information.
"""
def __init__(self):
pass
@property
def interfaces_info(self):
return {}
@property
def component_info(self):
return {}
@property
def enumerations(self):
return {}
@property
def typedefs(self):
return {}
@property
def union_types(self):
return set()
@property
def include_path_for_union_types(self):
return None
class ComponentInfoProviderCore(ComponentInfoProvider):
def __init__(self, interfaces_info, component_info):
super(ComponentInfoProviderCore, self).__init__()
self._interfaces_info = interfaces_info
self._component_info = component_info
@property
def interfaces_info(self):
return self._interfaces_info
@property
def component_info(self):
return self._component_info
@property
def enumerations(self):
return self._component_info['enumerations']
@property
def typedefs(self):
return self._component_info['typedefs']
@property
def union_types(self):
return self._component_info['union_types']
@property
def include_path_for_union_types(self):
return 'bindings/core/v8/UnionTypesCore.h'
@property
def specifier_for_export(self):
return 'CORE_EXPORT '
@property
def include_path_for_export(self):
return 'core/CoreExport.h'
class ComponentInfoProviderModules(ComponentInfoProvider):
def __init__(self, interfaces_info, component_info_core,
component_info_modules):
super(ComponentInfoProviderModules, self).__init__()
self._interfaces_info = interfaces_info
self._component_info_core = component_info_core
self._component_info_modules = component_info_modules
@property
def interfaces_info(self):
return self._interfaces_info
@property
def component_info(self):
return self._component_info_modules
@property
def enumerations(self):
enums = self._component_info_core['enumerations'].copy()
enums.update(self._component_info_modules['enumerations'])
return enums
@property
def typedefs(self):
typedefs = self._component_info_core['typedefs'].copy()
typedefs.update(self._component_info_modules['typedefs'])
return typedefs
@property
def union_types(self):
# Remove duplicate union types from component_info_modules to avoid
# generating multiple container generation.
return self._component_info_modules['union_types'] - self._component_info_core['union_types']
@property
def include_path_for_union_types(self):
return 'bindings/modules/v8/UnionTypesModules.h'
@property
def specifier_for_export(self):
return 'MODULES_EXPORT '
@property
def include_path_for_export(self):
return 'modules/ModulesExport.h'
def load_interfaces_info_overall_pickle(info_dir):
with open(os.path.join(info_dir, 'modules', 'InterfacesInfoOverall.pickle')) as interface_info_file:
return pickle.load(interface_info_file)
def merge_dict_recursively(target, diff):
"""Merges two dicts into one.
|target| will be updated with |diff|. Part of |diff| may be re-used in
|target|.
"""
for key, value in diff.iteritems():
if key not in target:
target[key] = value
elif type(value) == dict:
merge_dict_recursively(target[key], value)
elif type(value) == list:
target[key].extend(value)
elif type(value) == set:
target[key].update(value)
else:
# Testing IDLs want to overwrite the values. Production code
# doesn't need any overwriting.
target[key] = value
def create_component_info_provider_core(info_dir):
interfaces_info = load_interfaces_info_overall_pickle(info_dir)
with open(os.path.join(info_dir, 'core', 'ComponentInfoCore.pickle')) as component_info_file:
component_info = pickle.load(component_info_file)
return ComponentInfoProviderCore(interfaces_info, component_info)
def create_component_info_provider_modules(info_dir):
interfaces_info = load_interfaces_info_overall_pickle(info_dir)
with open(os.path.join(info_dir, 'core', 'ComponentInfoCore.pickle')) as component_info_file:
component_info_core = pickle.load(component_info_file)
with open(os.path.join(info_dir, 'modules', 'ComponentInfoModules.pickle')) as component_info_file:
component_info_modules = pickle.load(component_info_file)
return ComponentInfoProviderModules(
interfaces_info, component_info_core, component_info_modules)
def create_component_info_provider(info_dir, component):
if component == 'core':
return create_component_info_provider_core(info_dir)
elif component == 'modules':
return create_component_info_provider_modules(info_dir)
else:
return ComponentInfoProvider()
################################################################################
# Basic file reading/writing
################################################################################
def get_file_contents(filename):
with open(filename) as f:
return f.read()
def read_file_to_list(filename):
"""Returns a list of (stripped) lines for a given filename."""
with open(filename) as f:
return [line.rstrip('\n') for line in f]
def resolve_cygpath(cygdrive_names):
if not cygdrive_names:
return []
cmd = ['cygpath', '-f', '-', '-wa']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
idl_file_names = []
for file_name in cygdrive_names:
process.stdin.write('%s\n' % file_name)
process.stdin.flush()
idl_file_names.append(process.stdout.readline().rstrip())
process.stdin.close()
process.wait()
return idl_file_names
def read_idl_files_list_from_file(filename):
"""Similar to read_file_to_list, but also resolves cygpath."""
with open(filename) as input_file:
file_names = sorted([os.path.realpath(line.rstrip('\n'))
for line in input_file])
idl_file_names = [file_name for file_name in file_names
if not file_name.startswith('/cygdrive')]
cygdrive_names = [file_name for file_name in file_names
if file_name.startswith('/cygdrive')]
idl_file_names.extend(resolve_cygpath(cygdrive_names))
return idl_file_names
def read_pickle_files(pickle_filenames):
for pickle_filename in pickle_filenames:
with open(pickle_filename) as pickle_file:
yield pickle.load(pickle_file)
def write_file(new_text, destination_filename, only_if_changed):
if only_if_changed and os.path.isfile(destination_filename):
with open(destination_filename) as destination_file:
if destination_file.read() == new_text:
return
destination_dirname = os.path.dirname(destination_filename)
if not os.path.exists(destination_dirname):
os.makedirs(destination_dirname)
with open(destination_filename, 'w') as destination_file:
destination_file.write(new_text)
def write_pickle_file(pickle_filename, data, only_if_changed):
if only_if_changed and os.path.isfile(pickle_filename):
with open(pickle_filename) as pickle_file:
try:
if pickle.load(pickle_file) == data:
return
except Exception:
# If trouble unpickling, overwrite
pass
with open(pickle_filename, 'w') as pickle_file:
pickle.dump(data, pickle_file)
################################################################################
# IDL parsing
#
# We use regular expressions for parsing; this is incorrect (Web IDL is not a
# regular language), but simple and sufficient in practice.
# Leading and trailing context (e.g. following '{') used to avoid false matches.
################################################################################
def is_callback_interface_from_idl(file_contents):
match = re.search(r'callback\s+interface\s+\w+\s*{', file_contents)
return bool(match)
def should_generate_impl_file_from_idl(file_contents):
"""True when a given IDL file contents could generate .h/.cpp files."""
# FIXME: This would be error-prone and we should use AST rather than
# improving the regexp pattern.
match = re.search(r'(interface|dictionary|exception)\s+\w+', file_contents)
return bool(match)
def match_interface_extended_attributes_from_idl(file_contents):
# Strip comments
# re.compile needed b/c Python 2.6 doesn't support flags in re.sub
single_line_comment_re = re.compile(r'//.*$', flags=re.MULTILINE)
block_comment_re = re.compile(r'/\*.*?\*/', flags=re.MULTILINE | re.DOTALL)
file_contents = re.sub(single_line_comment_re, '', file_contents)
file_contents = re.sub(block_comment_re, '', file_contents)
match = re.search(r'\[(.*)\]\s*'
r'((callback|partial)\s+)?'
r'(interface|exception)\s+'
r'\w+\s*'
r'(:\s*\w+\s*)?'
r'{',
file_contents, flags=re.DOTALL)
return match
def get_interface_extended_attributes_from_idl(file_contents):
match = match_interface_extended_attributes_from_idl(file_contents)
if not match:
return {}
extended_attributes_string = match.group(1)
extended_attributes = {}
# FIXME: this splitting is WRONG: it fails on extended attributes where lists of
# multiple values are used, which are seperated by a comma and a space.
parts = [extended_attribute.strip()
for extended_attribute in re.split(',\s+', extended_attributes_string)
# Discard empty parts, which may exist due to trailing comma
if extended_attribute.strip()]
for part in parts:
name, _, value = map(string.strip, part.partition('='))
extended_attributes[name] = value
return extended_attributes
def get_interface_exposed_arguments(file_contents):
match = match_interface_extended_attributes_from_idl(file_contents)
if not match:
return None
extended_attributes_string = match.group(1)
match = re.search(r'[^=]\bExposed\(([^)]*)\)', file_contents)
if not match:
return None
arguments = []
for argument in map(string.strip, match.group(1).split(',')):
exposed, runtime_enabled = argument.split()
arguments.append({'exposed': exposed, 'runtime_enabled': runtime_enabled})
return arguments
| bsd-3-clause |
robhudson/kuma | vendor/packages/mock.py | 424 | 75527 | # mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 1.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0.1'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps as original_wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
f.__wrapped__ = original
return f
return inner
else:
if sys.version_info[:2] >= (3, 3):
wraps = original_wraps
else:
def wraps(func):
def inner(f):
f = original_wraps(func)(f)
f.__wrapped__ = func
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
if inPy3k:
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
else:
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
if inPy3k:
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
else:
signature = inspect.formatargspec(
regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_delegate
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
return result
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = set()
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
exc_info = tuple()
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not _callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.add(self)
return result
def stop(self):
"""Stop an active patch."""
self._active_patches.discard(self)
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches."""
for patch in list(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
| mpl-2.0 |
loco-odoo/localizacion_co | openerp/addons/test_converter/models.py | 367 | 2039 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
class test_model(orm.Model):
_name = 'test_converter.test_model'
_columns = {
'char': fields.char(),
'integer': fields.integer(),
'float': fields.float(),
'numeric': fields.float(digits=(16, 2)),
'many2one': fields.many2one('test_converter.test_model.sub'),
'binary': fields.binary(),
'date': fields.date(),
'datetime': fields.datetime(),
'selection': fields.selection([
(1, "réponse A"),
(2, "réponse B"),
(3, "réponse C"),
(4, "réponse D"),
]),
'selection_str': fields.selection([
('A', "Qu'il n'est pas arrivé à Toronto"),
('B', "Qu'il était supposé arriver à Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La réponse D"),
], string="Lorsqu'un pancake prend l'avion à destination de Toronto et "
"qu'il fait une escale technique à St Claude, on dit:"),
'html': fields.html(),
'text': fields.text(),
}
# `base` module does not contains any model that implement the `_group_by_full` functionality
# test this feature here...
def _gbf_m2o(self, cr, uid, ids, domain, read_group_order, access_rights_uid, context):
Sub = self.pool['test_converter.test_model.sub']
all_ids = Sub._search(cr, uid, [], access_rights_uid=access_rights_uid, context=context)
result = Sub.name_get(cr, access_rights_uid or uid, all_ids, context=context)
folds = {i: i not in ids for i, _ in result}
return result, folds
_group_by_full = {
'many2one': _gbf_m2o,
}
class test_model_sub(orm.Model):
_name = 'test_converter.test_model.sub'
_columns = {
'name': fields.char()
}
class test_model_monetary(orm.Model):
_name = 'test_converter.monetary'
_columns = {
'value': fields.float(digits=(16, 55)),
}
| agpl-3.0 |
ray-zhong/github_trend_spider | ENV/Lib/encodings/cp1256.py | 272 | 12814 | """ Python Character Mapping Codec cp1256 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1256.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1256',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\u067e' # 0x81 -> ARABIC LETTER PEH
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0679' # 0x8A -> ARABIC LETTER TTEH
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\u0686' # 0x8D -> ARABIC LETTER TCHEH
'\u0698' # 0x8E -> ARABIC LETTER JEH
'\u0688' # 0x8F -> ARABIC LETTER DDAL
'\u06af' # 0x90 -> ARABIC LETTER GAF
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u06a9' # 0x98 -> ARABIC LETTER KEHEH
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0691' # 0x9A -> ARABIC LETTER RREH
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\u200c' # 0x9D -> ZERO WIDTH NON-JOINER
'\u200d' # 0x9E -> ZERO WIDTH JOINER
'\u06ba' # 0x9F -> ARABIC LETTER NOON GHUNNA
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u060c' # 0xA1 -> ARABIC COMMA
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u06be' # 0xAA -> ARABIC LETTER HEH DOACHASHMEE
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u061b' # 0xBA -> ARABIC SEMICOLON
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\u06c1' # 0xC0 -> ARABIC LETTER HEH GOAL
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0637' # 0xD8 -> ARABIC LETTER TAH
'\u0638' # 0xD9 -> ARABIC LETTER ZAH
'\u0639' # 0xDA -> ARABIC LETTER AIN
'\u063a' # 0xDB -> ARABIC LETTER GHAIN
'\u0640' # 0xDC -> ARABIC TATWEEL
'\u0641' # 0xDD -> ARABIC LETTER FEH
'\u0642' # 0xDE -> ARABIC LETTER QAF
'\u0643' # 0xDF -> ARABIC LETTER KAF
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\u0644' # 0xE1 -> ARABIC LETTER LAM
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0645' # 0xE3 -> ARABIC LETTER MEEM
'\u0646' # 0xE4 -> ARABIC LETTER NOON
'\u0647' # 0xE5 -> ARABIC LETTER HEH
'\u0648' # 0xE6 -> ARABIC LETTER WAW
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0649' # 0xEC -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xED -> ARABIC LETTER YEH
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\u064b' # 0xF0 -> ARABIC FATHATAN
'\u064c' # 0xF1 -> ARABIC DAMMATAN
'\u064d' # 0xF2 -> ARABIC KASRATAN
'\u064e' # 0xF3 -> ARABIC FATHA
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u064f' # 0xF5 -> ARABIC DAMMA
'\u0650' # 0xF6 -> ARABIC KASRA
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0651' # 0xF8 -> ARABIC SHADDA
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\u0652' # 0xFA -> ARABIC SUKUN
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
mitch-kyle/message-board | support/apache-cassandra-2.2.1/pylib/cqlshlib/helptopics.py | 31 | 39690 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cql3handling import simple_cql_types
class CQLHelpTopics(object):
def get_help_topics(self):
return [ t[5:] for t in dir(self) if t.startswith('help_') ]
def print_help_topic(self, topic):
getattr(self, 'help_' + topic.lower())()
def help_types(self):
print "\n CQL types recognized by this version of cqlsh:\n"
for t in simple_cql_types:
print ' ' + t
print """
For information on the various recognizable input formats for these
types, or on controlling the formatting of cqlsh query output, see
one of the following topics:
HELP TIMESTAMP_INPUT
HELP DATE_INPUT
HELP TIME_INPUT
HELP BLOB_INPUT
HELP UUID_INPUT
HELP BOOLEAN_INPUT
HELP INT_INPUT
HELP TEXT_OUTPUT
HELP TIMESTAMP_OUTPUT
"""
def help_timestamp_input(self):
print """
Timestamp input
CQL supports any of the following ISO 8601 formats for timestamp
specification:
yyyy-mm-dd HH:mm
yyyy-mm-dd HH:mm:ss
yyyy-mm-dd HH:mmZ
yyyy-mm-dd HH:mm:ssZ
yyyy-mm-dd'T'HH:mm
yyyy-mm-dd'T'HH:mmZ
yyyy-mm-dd'T'HH:mm:ss
yyyy-mm-dd'T'HH:mm:ssZ
yyyy-mm-dd
yyyy-mm-ddZ
The Z in these formats refers to an RFC-822 4-digit time zone,
expressing the time zone's difference from UTC. For example, a
timestamp in Pacific Standard Time might be given thus:
2012-01-20 16:14:12-0800
If no time zone is supplied, the current time zone for the Cassandra
server node will be used.
"""
def help_date_input(self):
print """
Date input
CQL supports the following format for date specification:
yyyy-mm-dd
"""
def help_time_input(self):
print """
Time input
CQL supports the following format for time specification:
HH:MM:SS
HH:MM:SS.mmm
HH:MM:SS.mmmuuu
HH:MM:SS.mmmuuunnn
"""
def help_blob_input(self):
print """
Blob input
CQL blob data must be specified in a string literal as hexidecimal
data. Example: to store the ASCII values for the characters in the
string "CQL", use '43514c'.
"""
def help_uuid_input(self):
print """
UUID input
UUIDs may be specified in CQL using 32 hexidecimal characters,
split up using dashes in the standard UUID format:
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
"""
def help_boolean_input(self):
print """
Boolean input
CQL accepts the strings 'true' and 'false' (case insensitive)
as input for boolean types.
"""
def help_int_input(self):
print """
Integer input
CQL accepts the following integer types:
tinyint - 1-byte signed integer
smallint - 2-byte signed integer
int - 4-byte signed integer
bigint - 8-byte signed integer
"""
def help_timestamp_output(self):
print """
Timestamp output
Cqlsh will display timestamps in the following format by default:
yyyy-mm-dd HH:mm:ssZ
which is a format acceptable as CQL timestamp input as well.
The output format can be changed by setting 'time_format' property
in the [ui] section of .cqlshrc file.
"""
def help_text_output(self):
print """
Textual output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
help_ascii_output = help_text_output
def help_create_index(self):
print """
CREATE INDEX [<indexname>] ON <cfname> ( <colname> );
A CREATE INDEX statement is used to create a new, automatic secondary
index on the given CQL table, for the named column. A name for the
index itself can be specified before the ON keyword, if desired. A
single column name must be specified inside the parentheses. It is not
necessary for the column to exist on any current rows (Cassandra is
schema-optional), but the column must already have a type (specified
during the CREATE TABLE, or added afterwards with ALTER TABLE).
"""
def help_drop(self):
print """
There are different variants of DROP. For more information, see
one of the following:
HELP DROP_KEYSPACE;
HELP DROP_TABLE;
HELP DROP_INDEX;
HELP DROP_FUNCTION;
HELP DROP_AGGREGATE;
"""
def help_drop_keyspace(self):
print """
DROP KEYSPACE <keyspacename>;
A DROP KEYSPACE statement results in the immediate, irreversible
removal of a keyspace, including all column families in it, and all
data contained in those column families.
"""
def help_drop_table(self):
print """
DROP TABLE <tablename>;
A DROP TABLE statement results in the immediate, irreversible
removal of a CQL table and the underlying column family, including all
data contained in it.
"""
help_drop_columnfamily = help_drop_table
def help_drop_index(self):
print """
DROP INDEX <indexname>;
A DROP INDEX statement is used to drop an existing secondary index.
"""
def help_drop_function(self):
print """
DROP FUNCTION ( IF EXISTS )?
( <keyspace> '.' )? <function-name>
( '(' <arg-type> ( ',' <arg-type> )* ')' )?
DROP FUNCTION statement removes a function created using CREATE FUNCTION.
You must specify the argument types (signature) of the function to drop if there
are multiple functions with the same name but a different signature
(overloaded functions).
DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists.
"""
def help_drop_aggregate(self):
print """
DROP AGGREGATE ( IF EXISTS )?
( <keyspace> '.' )? <aggregate-name>
( '(' <arg-type> ( ',' <arg-type> )* ')' )?
The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE.
You must specify the argument types of the aggregate to drop if there are multiple
aggregates with the same name but a different signature (overloaded aggregates).
DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists,
and does nothing if a function with the signature does not exist.
Signatures for user-defined aggregates follow the same rules as for
user-defined functions.
"""
def help_truncate(self):
print """
TRUNCATE <tablename>;
TRUNCATE accepts a single argument for the table name, and permanently
removes all data from it.
"""
def help_create(self):
print """
There are different variants of CREATE. For more information, see
one of the following:
HELP CREATE_KEYSPACE;
HELP CREATE_TABLE;
HELP CREATE_INDEX;
HELP CREATE_FUNCTION;
HELP CREATE_AGGREGATE;
"""
def help_use(self):
print """
USE <keyspacename>;
Tells cqlsh and the connected Cassandra instance that you will be
working in the given keyspace. All subsequent operations on tables
or indexes will be in the context of this keyspace, unless otherwise
specified, until another USE command is issued or the connection
terminates.
As always, when a keyspace name does not work as a normal identifier or
number, it can be quoted using double quotes.
"""
def help_create_aggregate(self):
print """
CREATE ( OR REPLACE )? AGGREGATE ( IF NOT EXISTS )?
( <keyspace> '.' )? <aggregate-name>
'(' <arg-type> ( ',' <arg-type> )* ')'
SFUNC ( <keyspace> '.' )? <state-functionname>
STYPE <state-type>
( FINALFUNC ( <keyspace> '.' )? <final-functionname> )?
( INITCOND <init-cond> )?
CREATE AGGREGATE creates or replaces a user-defined aggregate.
CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate
or replaces an existing one with the same signature. A CREATE AGGREGATE without
OR REPLACE fails if an aggregate with the same signature already exists.
CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate
if it does not already exist.
OR REPLACE and IF NOT EXIST cannot be used together.
Aggregates belong to a keyspace. If no keyspace is specified in <aggregate-name>, the
current keyspace is used (i.e. the keyspace specified using the USE statement). It is
not possible to create a user-defined aggregate in one of the system keyspaces.
Signatures for user-defined aggregates follow the same rules as for
user-defined functions.
STYPE defines the type of the state value and must be specified.
The optional INITCOND defines the initial state value for the aggregate. It defaults
to null. A non-null INITCOND must be specified for state functions that are declared
with RETURNS NULL ON NULL INPUT.
SFUNC references an existing function to be used as the state modifying function. The
type of first argument of the state function must match STYPE. The remaining argument
types of the state function must match the argument types of the aggregate function.
State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and
called with null.
The optional FINALFUNC is called just before the aggregate result is returned. It must
take only one argument with type STYPE. The return type of the FINALFUNC may be a
different type. A final function declared with RETURNS NULL ON NULL INPUT means that
the aggregate's return value will be null, if the last state is null.
If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE.
If a FINALFUNC is defined, it is the return type of that function.
"""
def help_create_function(self):
print """
CREATE ( OR REPLACE )? FUNCTION ( IF NOT EXISTS )?
( <keyspace> '.' )? <function-name>
'(' <arg-name> <arg-type> ( ',' <arg-name> <arg-type> )* ')'
( CALLED | RETURNS NULL ) ON NULL INPUT
RETURNS <type>
LANGUAGE <language>
AS <body>
CREATE FUNCTION creates or replaces a user-defined function.
Signatures are used to distinguish individual functions. The signature consists of:
The fully qualified function name - i.e keyspace plus function-name
The concatenated list of all argument types
Note that keyspace names, function names and argument types are subject to the default
naming conventions and case-sensitivity rules.
CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or
replaces an existing one with the same signature. A CREATE FUNCTION without OR REPLACE
fails if a function with the same signature already exists.
Behavior on invocation with null values must be defined for each function. There are
two options:
RETURNS NULL ON NULL INPUT declares that the function will always return null if any
of the input arguments is null. CALLED ON NULL INPUT declares that the function will
always be executed.
If the optional IF NOT EXISTS keywords are used, the function will only be created if
another function with the same signature does not exist.
OR REPLACE and IF NOT EXIST cannot be used together.
Functions belong to a keyspace. If no keyspace is specified in <function-name>, the
current keyspace is used (i.e. the keyspace specified using the USE statement).
It is not possible to create a user-defined function in one of the system keyspaces.
"""
def help_create_table(self):
print """
CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [,
<colname> <type> [, ...]] )
[WITH <optionname> = <val> [AND <optionname> = <val> [...]]];
CREATE TABLE statements create a new CQL table under the current
keyspace. Valid table names are strings of alphanumeric characters and
underscores, which begin with a letter.
Each table requires a primary key, which will correspond to the
underlying columnfamily key and key validator. It's important to
note that the key type you use must be compatible with the partitioner
in use. For example, OrderPreservingPartitioner and
CollatingOrderPreservingPartitioner both require UTF-8 keys.
In cql3 mode, a table can have multiple columns composing the primary
key (see HELP COMPOUND_PRIMARY_KEYS).
For more information, see one of the following:
HELP CREATE_TABLE_TYPES;
HELP CREATE_TABLE_OPTIONS;
"""
help_create_columnfamily = help_create_table
def help_compound_primary_keys(self):
print """
CREATE TABLE <cfname> ( <partition_key> <type>, <clustering_key1> type, <clustering_key2> type,
[, ...]], PRIMARY KEY (<partition_key>, <clustering_key1>, <clustering_key2>);
CREATE TABLE allows a primary key composed of multiple columns. When this is the case, specify
the columns that take part in the compound key after all columns have been specified.
, PRIMARY KEY( <key1>, <key2>, ... )
The partitioning key itself can be a compound key, in which case the first element of the PRIMARY KEY
phrase should be parenthesized, as
PRIMARY KEY ((<partition_key_part1>, <partition_key_part2>), <clustering_key>)
"""
def help_create_table_types(self):
print """
CREATE TABLE: Specifying column types
CREATE ... (KEY <type> PRIMARY KEY,
othercol <type>) ...
It is possible to assign columns a type during table creation. Columns
configured with a type are validated accordingly when a write occurs,
and intelligent CQL drivers and interfaces will be able to decode the
column values correctly when receiving them. Column types are specified
as a parenthesized, comma-separated list of column term and type pairs.
See HELP TYPES; for the list of recognized types.
"""
help_create_columnfamily_types = help_create_table_types
def help_create_table_options(self):
print """
CREATE TABLE: Specifying columnfamily options
CREATE TABLE blah (...)
WITH optionname = val AND otheroption = val2;
A number of optional keyword arguments can be supplied to control the
configuration of a new CQL table, such as the size of the associated
row and key caches for the underlying Cassandra columnfamily. Consult
your CQL reference for the complete list of options and possible
values.
"""
help_create_columnfamily_options = help_create_table_options
def help_alter_alter(self):
print """
ALTER TABLE: altering existing typed columns
ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid;
ALTER TABLE ... ALTER changes the expected storage type for a column.
The column must already have a type in the column family metadata. The
column may or may not already exist in current rows-- but be aware that
no validation of existing data is done. The bytes stored in values for
that column will remain unchanged, and if existing data is not
deserializable according to the new type, this may cause your CQL
driver or interface to report errors.
"""
def help_alter_add(self):
print """
ALTER TABLE: adding a typed column
ALTER TABLE addamsFamily ADD gravesite varchar;
The ALTER TABLE ... ADD variant adds a typed column to a column
family. The column must not already have a type in the column family
metadata. See the warnings on HELP ALTER_ALTER regarding the lack of
validation of existing data; they apply here as well.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Note that this does _not_ remove the
column from current rows; it just removes the metadata saying that the
bytes stored under that column are expected to be deserializable
according to a certain type.
"""
def help_alter_with(self):
print """
ALTER TABLE: changing column family properties
ALTER TABLE addamsFamily WITH comment = 'Glad to be here!'
AND read_repair_chance = 0.2;
An ALTER TABLE ... WITH statement makes adjustments to the
table properties, as defined when the table was created (see
HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for
information about the supported parameter names and values).
"""
def help_delete_columns(self):
print """
DELETE: specifying columns
DELETE col1, col2, col3 FROM ...
Following the DELETE keyword is an optional comma-delimited list of
column name terms. When no column names are given, the remove applies
to the entire row(s) matched by the WHERE clause.
When column names do not parse as valid CQL identifiers, they can be
quoted in single quotes (CQL 2) or double quotes (CQL 3).
"""
def help_delete_where(self):
print """
DELETE: specifying rows
DELETE ... WHERE keycol = 'some_key_value';
DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2';
DELETE ... WHERE keycol IN (key1, key2);
The WHERE clause is used to determine to which row(s) a DELETE
applies. The first form allows the specification of a precise row
by specifying a particular primary key value (if the primary key has
multiple columns, values for each must be given). The second form
allows a list of key values to be specified using the IN operator
and a parenthesized list of comma-delimited key values.
"""
def help_update_set(self):
print """
UPDATE: Specifying Columns and Row
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> = keyname;
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> IN ('<key1>', '<key2>', ...)
Rows are created or updated by supplying column names and values in
term assignment format. Multiple columns can be set by separating the
name/value pairs using commas.
"""
def help_update_counters(self):
print """
UPDATE: Updating Counter Columns
UPDATE ... SET name1 = name1 + <value> ...
UPDATE ... SET name1 = name1 - <value> ...
Counter columns can be incremented or decremented by an arbitrary
numeric value though the assignment of an expression that adds or
subtracts the value.
"""
def help_update_where(self):
print """
UPDATE: Selecting rows to update
UPDATE ... WHERE <keyname> = <keyval>;
UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...);
UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>;
Each update statement requires a precise set of keys to be specified
using a WHERE clause.
If the table's primary key consists of multiple columns, an explicit
value must be given for each for the UPDATE statement to make sense.
"""
def help_select_table(self):
print """
SELECT: Specifying Table
SELECT ... FROM [<keyspace>.]<tablename> ...
The FROM clause is used to specify the CQL table applicable to a SELECT
query. The keyspace in which the table exists can optionally be
specified along with the table name, separated by a dot (.). This will
not change the current keyspace of the session (see HELP USE).
"""
help_select_columnfamily = help_select_table
def help_select_where(self):
print """
SELECT: Filtering rows
SELECT ... WHERE <key> = keyname AND name1 = value1
SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1
SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...)
The WHERE clause provides for filtering the rows that appear in
results. The clause can filter on a key name, or range of keys, and in
the case of indexed columns, on column values. Key filters are
specified using the KEY keyword or key alias name, a relational
operator (one of =, >, >=, <, and <=), and a term value. When terms
appear on both sides of a relational operator it is assumed the filter
applies to an indexed column. With column index filters, the term on
the left of the operator is the name, the term on the right is the
value to filter _on_.
Note: The greater-than and less-than operators (> and <) result in key
ranges that are inclusive of the terms. There is no supported notion of
"strictly" greater-than or less-than; these operators are merely
supported as aliases to >= and <=.
"""
def help_select_limit(self):
print """
SELECT: Limiting results
SELECT ... WHERE <clause> [LIMIT n] ...
Limiting the number of rows returned can be achieved by adding the
LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left
unset.
"""
class CQL3HelpTopics(CQLHelpTopics):
def help_create_keyspace(self):
print """
CREATE KEYSPACE <ksname>
WITH replication = {'class':'<strategy>' [,'<option>':<val>]};
The CREATE KEYSPACE statement creates a new top-level namespace (aka
"keyspace"). Valid names are any string constructed of alphanumeric
characters and underscores. Names which do not work as valid
identifiers or integers should be quoted as string literals. Properties
such as replication strategy and count are specified during creation
as key-value pairs in the 'replication' map:
class [required]: The name of the replication strategy class
which should be used for the new keyspace. Some often-used classes
are SimpleStrategy and NetworkTopologyStrategy.
other options [optional]: Most strategies require additional arguments
which can be supplied as key-value pairs in the 'replication' map.
Examples:
To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1"
with a value of "1" and "DC2" with a value of "2" you would use
the following statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2};
To create a keyspace with SimpleStrategy and "replication_factor" option
with a value of "3" you would use this statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
"""
def help_begin(self):
print """
BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>]
<insert or update or delete statement> ;
[ <another insert or update or delete statement ;
[...]]
APPLY BATCH;
BATCH supports setting a client-supplied optional global timestamp
which will be used for each of the operations included in the batch.
Only data modification statements (specifically, UPDATE, INSERT,
and DELETE) are allowed in a BATCH statement. BATCH is _not_ an
analogue for SQL transactions.
_NOTE: Counter mutations are allowed only within COUNTER batches._
_NOTE: While there are no isolation guarantees, UPDATE queries are
atomic within a given record._
"""
help_apply = help_begin
def help_select(self):
print """
SELECT <selectExpr>
FROM [<keyspace>.]<table>
[WHERE <clause>]
[ORDER BY <colname> [DESC]]
[LIMIT m];
SELECT is used to read one or more records from a CQL table. It returns
a set of rows matching the selection criteria specified.
For more information, see one of the following:
HELP SELECT_EXPR
HELP SELECT_TABLE
HELP SELECT_WHERE
HELP SELECT_LIMIT
"""
def help_delete(self):
print """
DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename>
[USING TIMESTAMP <timestamp>]
WHERE <keyname> = <keyvalue>;
A DELETE is used to perform the removal of one or more columns from one
or more rows. Each DELETE statement requires a precise set of row keys
to be specified using a WHERE clause and the KEY keyword or key alias.
For more information, see one of the following:
HELP DELETE_USING
HELP DELETE_COLUMNS
HELP DELETE_WHERE
"""
def help_delete_using(self):
print """
DELETE: the USING clause
DELETE ... USING TIMESTAMP <timestamp>;
<timestamp> defines the optional timestamp for the new tombstone
record. It must be an integer. Cassandra timestamps are generally
specified using milliseconds since the Unix epoch (1970-01-01 00:00:00
UTC).
"""
def help_update(self):
print """
UPDATE [<keyspace>.]<columnFamily>
[USING [TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]]
SET name1 = value1, name2 = value2 WHERE <keycol> = keyval
[IF EXISTS];
An UPDATE is used to write one or more columns to a record in a table.
No results are returned. The record's primary key must be completely
and uniquely specified; that is, if the primary key includes multiple
columns, all must be explicitly given in the WHERE clause.
Statements begin with the UPDATE keyword followed by the name of the
table to be updated.
For more information, see one of the following:
HELP UPDATE_USING
HELP UPDATE_SET
HELP UPDATE_COUNTERS
HELP UPDATE_WHERE
"""
def help_update_using(self):
print """
UPDATE: the USING clause
UPDATE ... USING TIMESTAMP <timestamp>;
UPDATE ... USING TTL <timeToLive>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603
<timestamp> defines the optional timestamp for the new column value(s).
It must be an integer. Cassandra timestamps are generally specified
using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC).
<timeToLive> defines the optional time to live (TTL) in seconds for the
new column value(s). It must be an integer.
"""
def help_insert(self):
print """
INSERT INTO [<keyspace>.]<tablename>
( <colname1>, <colname2> [, <colname3> [, ...]] )
VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] )
[USING TIMESTAMP <timestamp>]
[AND TTL <timeToLive>];
An INSERT is used to write one or more columns to a record in a
CQL table. No results are returned.
Values for all component columns in the table's primary key must
be given. Also, there must be at least one non-primary-key column
specified (Cassandra rows are not considered to exist with only
a key and no associated columns).
Unlike in SQL, the semantics of INSERT and UPDATE are identical.
In either case a record is created if none existed before, and
udpated when it does. For more information, see one of the
following:
HELP UPDATE
HELP UPDATE_USING
"""
def help_select_expr(self):
print """
SELECT: Specifying Columns
SELECT name1, name2, name3 FROM ...
SELECT COUNT(*) FROM ...
The SELECT expression determines which columns will appear in the
results and takes the form of a comma separated list of names.
It is worth noting that unlike the projection in a SQL SELECT, there is
no guarantee that the results will contain all of the columns
specified. This is because Cassandra is schema-less and there are no
guarantees that a given column exists.
When the COUNT aggregate function is specified as a column to fetch, a
single row will be returned, with a single column named "count" whose
value is the number of rows from the pre-aggregation resultset.
Currently, COUNT is the only function supported by CQL.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Dropped columns will immediately
become unavailable in the queries and will not be included in
compacted sstables in the future. If a column is readded, queries
won't return values written before the column was last dropped.
It is assumed that timestamps represent actual time, so if this
is not your case, you should NOT readd previously dropped columns.
Columns can't be dropped from tables defined with COMPACT STORAGE.
"""
def help_create(self):
super(CQL3HelpTopics, self).help_create()
print """ HELP CREATE_USER;
HELP CREATE_ROLE;
"""
def help_alter(self):
print """
ALTER TABLE <tablename> ALTER <columnname> TYPE <type>;
ALTER TABLE <tablename> ADD <columnname> <type>;
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]];
An ALTER statement is used to manipulate table metadata. It allows you
to add new typed columns, drop existing columns, change the data
storage type of existing columns, or change table properties.
No results are returned.
See one of the following for more information:
HELP ALTER_ALTER;
HELP ALTER_ADD;
HELP ALTER_DROP;
HELP ALTER_RENAME;
HELP ALTER_WITH;
"""
def help_alter_rename(self):
print """
ALTER TABLE: renaming a column
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
The ALTER TABLE ... RENAME variant renames a typed column in a column
family.
"""
def help_drop(self):
super(CQL3HelpTopics, self).help_create()
print """ HELP DROP_USER;
HELP DROP_ROLE;
"""
def help_list(self):
print """
There are different variants of LIST. For more information, see
one of the following:
HELP LIST_USERS;
HELP LIST_PERMISSIONS;
"""
def help_create_user(self):
print """
CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
CREATE USER creates a new Cassandra user account.
Only superusers can issue CREATE USER requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
WITH PASSWORD clause should only be used with password-based authenticators,
e.g. PasswordAuthenticator, SimpleAuthenticator.
"""
def help_alter_user(self):
print """
ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
Use ALTER USER to change a user's superuser status and/or password (only
with password-based authenticators).
Superusers can change a user's password or superuser status (except their own).
Users cannot change their own superuser status. Ordinary users can only change their
password (if the configured authenticator is password-based).
"""
def help_drop_user(self):
print """
DROP USER <username>;
DROP USER removes an existing user. You have to be logged in as a superuser
to issue a DROP USER statement. A user cannot drop themselves.
"""
def help_list_users(self):
print """
LIST USERS;
List existing users and their superuser status.
"""
def help_grant(self):
print """
GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
TO [ROLE <rolename> | USER <username>]
Grant the specified permission (or all permissions) on a resource
to a role or user.
To be able to grant a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_revoke(self):
print """
REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
FROM [ROLE <rolename> | USER <username>]
Revokes the specified permission (or all permissions) on a resource
from a role or user.
To be able to revoke a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_list_permissions(self):
print """
LIST (<permission> [PERMISSION] | ALL [PERMISSIONS])
[ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>]
[OF [ROLE <rolename> | USER <username>]
[NORECURSIVE]
Omitting ON <resource> part will list permissions on ALL KEYSPACES,
every keyspace and table.
Omitting OF [ROLE <rolename> | USER <username>] part will list permissions
of all roles and users.
Omitting NORECURSIVE specifier will list permissions of the resource
and all its parents (table, table's keyspace and ALL KEYSPACES).
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_permissions(self):
print """
PERMISSIONS
Cassandra has 6 permissions:
ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX
AUTHORIZE: required for GRANT, REVOKE
CREATE: required for CREATE KEYSPACE, CREATE TABLE
DROP: required for DROP KEYSPACE, DROP TABLE
MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE
SELECT: required for SELECT
"""
def help_create_role(self):
print """
CREATE ROLE <rolename>;
CREATE ROLE creates a new Cassandra role.
Only superusers can issue CREATE ROLE requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
"""
def help_drop_role(self):
print """
DROP ROLE <rolename>;
DROP ROLE removes an existing role. You have to be logged in as a superuser
to issue a DROP ROLE statement.
"""
def help_list_roles(self):
print """
LIST ROLES [OF [ROLE <rolename> | USER <username>] [NORECURSIVE]];
Only superusers can use the OF clause to list the roles granted to a role or user.
If a superuser omits the OF clause then all the created roles will be listed.
If a non-superuser calls LIST ROLES then the roles granted to that user are listed.
If NORECURSIVE is provided then only directly granted roles are listed.
"""
def help_grant_role(self):
print """
GRANT ROLE <rolename> TO [ROLE <rolename> | USER <username>]
Grant the specified role to another role or user. You have to be logged
in as superuser to issue a GRANT ROLE statement.
"""
def help_revoke_role(self):
print """
REVOKE ROLE <rolename> FROM [ROLE <rolename> | USER <username>]
Revoke the specified role from another role or user. You have to be logged
in as superuser to issue a REVOKE ROLE statement.
"""
| apache-2.0 |
V-Lam/School-Assignments-and-Labs-2014-2017 | CS 3346 - Python -- (Artificial Intelligence 1)/assignment 2/multiagent/ghostAgents.py | 21 | 2702 | # ghostAgents.py
# --------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from game import Agent
from game import Actions
from game import Directions
import random
from util import manhattanDistance
import util
class GhostAgent( Agent ):
def __init__( self, index ):
self.index = index
def getAction( self, state ):
dist = self.getDistribution(state)
if len(dist) == 0:
return Directions.STOP
else:
return util.chooseFromDistribution( dist )
def getDistribution(self, state):
"Returns a Counter encoding a distribution over actions from the provided state."
util.raiseNotDefined()
class RandomGhost( GhostAgent ):
"A ghost that chooses a legal action uniformly at random."
def getDistribution( self, state ):
dist = util.Counter()
for a in state.getLegalActions( self.index ): dist[a] = 1.0
dist.normalize()
return dist
class DirectionalGhost( GhostAgent ):
"A ghost that prefers to rush Pacman, or flee when scared."
def __init__( self, index, prob_attack=0.8, prob_scaredFlee=0.8 ):
self.index = index
self.prob_attack = prob_attack
self.prob_scaredFlee = prob_scaredFlee
def getDistribution( self, state ):
# Read variables from state
ghostState = state.getGhostState( self.index )
legalActions = state.getLegalActions( self.index )
pos = state.getGhostPosition( self.index )
isScared = ghostState.scaredTimer > 0
speed = 1
if isScared: speed = 0.5
actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
pacmanPosition = state.getPacmanPosition()
# Select best actions given the state
distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
if isScared:
bestScore = max( distancesToPacman )
bestProb = self.prob_scaredFlee
else:
bestScore = min( distancesToPacman )
bestProb = self.prob_attack
bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]
# Construct distribution
dist = util.Counter()
for a in bestActions: dist[a] = bestProb / len(bestActions)
for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
dist.normalize()
return dist
| mit |
cod3monk/RIPE-Atlas-sbucket | setup.py | 1 | 3944 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='sbucket',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.4',
py_modules=['sbucket'],
description='Spatial Bucketing of RIPE Atlas Probes on Map Projections.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/cod3monk/RIPE-Atlas-sbucket',
# Author details
author='Julian Hammer',
author_email='julian.hammer@fau.de',
# Choose your license
license='AGPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'Topic :: Utilities',
'Topic :: Internet',
'Environment :: Console',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pyproj'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'pyproj': 'README.md',
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sbucket=sbucket:main',
],
},
)
| agpl-3.0 |
calancha/DIRAC | Core/DISET/private/Transports/SSL/SocketInfoFactory.py | 3 | 6073 | # $HeadURL$
__RCSID__ = "$Id$"
import socket
import select
import os
try:
import hashlib as md5
except:
import md5
import GSI
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.Core.Utilities import List, Network
from DIRAC.Core.DISET.private.Transports.SSL.SocketInfo import SocketInfo
from DIRAC.Core.DISET.private.Transports.SSL.SessionManager import gSessionManager
from DIRAC.Core.DISET.private.Transports.SSL.FakeSocket import FakeSocket
from DIRAC.Core.DISET.private.Transports.SSL.ThreadSafeSSLObject import ThreadSafeSSLObject
if GSI.__version__ < "0.5.0":
raise Exception( "Required GSI version >= 0.5.0" )
class SocketInfoFactory:
def generateClientInfo( self, destinationHostname, kwargs ):
infoDict = { 'clientMode' : True,
'hostname' : destinationHostname,
'timeout' : 600,
'enableSessions' : True }
for key in kwargs.keys():
infoDict[ key ] = kwargs[ key ]
try:
return S_OK( SocketInfo( infoDict ) )
except Exception, e:
return S_ERROR( "Error while creating SSL context: %s" % str( e ) )
def generateServerInfo( self, kwargs ):
infoDict = { 'clientMode' : False, 'timeout' : 30 }
for key in kwargs.keys():
infoDict[ key ] = kwargs[ key ]
try:
return S_OK( SocketInfo( infoDict ) )
except Exception, e:
return S_ERROR( str( e ) )
def __socketConnect( self, hostAddress, timeout, retries = 2 ):
osSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
#osSocket.setblocking( 0 )
if timeout:
osSocket.settimeout( 5 )
try:
osSocket.connect( hostAddress )
except socket.error , e:
if e.args[0] == "timed out":
osSocket.close()
if retries:
return self.__socketConnect( hostAddress, timeout, retries - 1 )
else:
return S_ERROR( "Can't connect: %s" % str( e ) )
if e.args[0] not in ( 114, 115 ):
return S_ERROR( "Can't connect: %s" % str( e ) )
#Connect in progress
oL = select.select( [], [ osSocket ], [], timeout )[1]
if len( oL ) == 0:
osSocket.close()
return S_ERROR( "Connection timeout" )
errno = osSocket.getsockopt( socket.SOL_SOCKET, socket.SO_ERROR )
if errno != 0:
return S_ERROR( "Can't connect: %s" % str( ( errno, os.strerror( errno ) ) ) )
return S_OK( osSocket )
def __connect( self, socketInfo, hostAddress ):
#Connect baby!
result = self.__socketConnect( hostAddress, socketInfo.infoDict[ 'timeout' ] )
if not result[ 'OK' ]:
return result
osSocket = result[ 'Value' ]
#SSL MAGIC
sslSocket = GSI.SSL.Connection( socketInfo.getSSLContext(), osSocket )
#Generate sessionId
sessionHash = md5.md5()
sessionHash.update( str( hostAddress ) )
sessionHash.update( "|%s" % str( socketInfo.getLocalCredentialsLocation() ) )
for key in ( 'proxyLocation', 'proxyString' ):
if key in socketInfo.infoDict:
sessionHash.update( "|%s" % str( socketInfo.infoDict[ key ] ) )
if 'proxyChain' in socketInfo.infoDict:
sessionHash.update( "|%s" % socketInfo.infoDict[ 'proxyChain' ].dumpAllToString()[ 'Value' ] )
sessionId = sessionHash.hexdigest()
socketInfo.sslContext.set_session_id( str( hash( sessionId ) ) )
socketInfo.setSSLSocket( sslSocket )
if gSessionManager.isValid( sessionId ):
sslSocket.set_session( gSessionManager.get( sessionId ) )
#Set the real timeout
if socketInfo.infoDict[ 'timeout' ]:
sslSocket.settimeout( socketInfo.infoDict[ 'timeout' ] )
#Connected!
return S_OK( sslSocket )
def getSocket( self, hostAddress, **kwargs ):
hostName = hostAddress[0]
retVal = self.generateClientInfo( hostName, kwargs )
if not retVal[ 'OK' ]:
return retVal
socketInfo = retVal[ 'Value' ]
retVal = Network.getIPsForHostName( hostName )
if not retVal[ 'OK' ]:
return S_ERROR( "Could not resolve %s: %s" % ( hostName, retVal[ 'Message' ] ) )
ipList = List.randomize( retVal[ 'Value' ] )
for i in range( 3 ):
connected = False
errorsList = []
for ip in ipList :
ipAddress = ( ip, hostAddress[1] )
retVal = self.__connect( socketInfo, ipAddress )
if retVal[ 'OK' ]:
sslSocket = retVal[ 'Value' ]
connected = True
break
errorsList.append( "%s: %s" % ( ipAddress, retVal[ 'Message' ] ) )
if not connected:
return S_ERROR( "Could not connect to %s: %s" % ( hostAddress, "," .join( [ e for e in errorsList ] ) ) )
retVal = socketInfo.doClientHandshake()
if retVal[ 'OK' ]:
#Everything went ok. Don't need to retry
break
#Did the auth or the connection fail?
if not retVal['OK']:
return retVal
if 'enableSessions' in kwargs and kwargs[ 'enableSessions' ]:
sessionId = hash( hostAddress )
gSessionManager.set( sessionId, sslSocket.get_session() )
return S_OK( socketInfo )
def getListeningSocket( self, hostAddress, listeningQueueSize = 5, reuseAddress = True, **kwargs ):
osSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
if reuseAddress:
osSocket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
retVal = self.generateServerInfo( kwargs )
if not retVal[ 'OK' ]:
return retVal
socketInfo = retVal[ 'Value' ]
sslSocket = GSI.SSL.Connection( socketInfo.getSSLContext(), osSocket )
sslSocket.bind( hostAddress )
sslSocket.listen( listeningQueueSize )
socketInfo.setSSLSocket( sslSocket )
return S_OK( socketInfo )
def renewServerContext( self, origSocketInfo ):
retVal = self.generateServerInfo( origSocketInfo.infoDict )
if not retVal[ 'OK' ]:
return retVal
socketInfo = retVal[ 'Value' ]
osSocket = origSocketInfo.getSSLSocket().get_socket()
sslSocket = GSI.SSL.Connection( socketInfo.getSSLContext(), osSocket )
socketInfo.setSSLSocket( sslSocket )
return S_OK( socketInfo )
gSocketInfoFactory = SocketInfoFactory()
| gpl-3.0 |
mnaza/themis | docs/examples/python/ssession_test_tornado_client.py | 4 | 2263 | #
# Copyright (c) 2015 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
echo client for tornado
"""
import tornado.ioloop
import tornado.httpclient
from pythemis import ssession
client_private = b"\x52\x45\x43\x32\x00\x00\x00\x2d\x51\xf4\xaa\x72\x00\x9f\x0f\x09\xce\xbe\x09\x33\xc2\x5e\x9a\x05\x99\x53\x9d\xb2\x32\xa2\x34\x64\x7a\xde\xde\x83\x8f\x65\xa9\x2a\x14\x6d\xaa\x90\x01"
server_public = b"\x55\x45\x43\x32\x00\x00\x00\x2d\x75\x58\x33\xd4\x02\x12\xdf\x1f\xe9\xea\x48\x11\xe1\xf9\x71\x8e\x24\x11\xcb\xfd\xc0\xa3\x6e\xd6\xac\x88\xb6\x44\xc2\x9a\x24\x84\xee\x50\x4c\x3e\xa0"
http_client = tornado.httpclient.HTTPClient()
session = ssession.SSession(
b"client", client_private,
ssession.SimpleMemoryTransport(b'server', server_public))
def http_fetch(data):
return http_client.fetch(
tornado.httpclient.HTTPRequest(
"http://127.0.0.1:26260", "POST",
headers={'Content-Type': 'application/octet-stream'},
body=data))
try:
connect_request = session.connect_request()
# send initial message to server
response = http_fetch(connect_request)
# decrypt accepted message
message = session.unwrap(response.body)
while not session.is_established():
# send unwrapped message to server as is
response = http_fetch(message)
# decrypt accepted message
message = session.unwrap(response.body)
# wrap and send inform message
response = http_fetch(session.wrap(b"This is test message"))
# decrypt accepted message
message = session.unwrap(response.body)
print(message)
except tornado.httpclient.HTTPError as e:
print("Error: " + str(e))
except Exception as e:
print("Error: " + str(e))
http_client.close()
| apache-2.0 |
mm112287/2015cda_g8_0421 | static/Brython3.1.0-20150301-090019/Lib/bisect.py | 1261 | 2595 | """Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass
| gpl-3.0 |
Beauhurst/django | django/contrib/gis/db/backends/postgis/adapter.py | 25 | 2203 | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
from django.contrib.gis.db.backends.postgis.pgraster import to_pgraster
from django.contrib.gis.geometry.backend import Geometry
class PostGISAdapter:
def __init__(self, obj, geography=False):
"""
Initialize on the spatial object.
"""
self.is_geometry = isinstance(obj, (Geometry, PostGISAdapter))
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry or raster.
if self.is_geometry:
self.ewkb = bytes(obj.ewkb)
self._adapter = Binary(self.ewkb)
else:
self.ewkb = to_pgraster(obj)
self.srid = obj.srid
self.geography = geography
def __conform__(self, proto):
"""Does the given protocol conform to what Psycopg2 expects?"""
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
if not isinstance(other, PostGISAdapter):
return False
return (self.ewkb == other.ewkb) and (self.srid == other.srid)
def __hash__(self):
return hash((self.ewkb, self.srid))
def __str__(self):
return self.getquoted()
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
if self.is_geometry:
self._adapter.prepare(conn)
def getquoted(self):
"""
Return a properly quoted string for use in PostgreSQL/PostGIS.
"""
if self.is_geometry:
# Psycopg will figure out whether to use E'\\000' or '\000'.
return '%s(%s)' % (
'ST_GeogFromWKB' if self.geography else 'ST_GeomFromEWKB',
self._adapter.getquoted().decode()
)
else:
# For rasters, add explicit type cast to WKB string.
return "'%s'::raster" % self.ewkb
| bsd-3-clause |
LinusU/fbthrift | thrift/lib/py/protocol/TJSONProtocol.py | 11 | 14345 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .TProtocol import TProtocolBase, TProtocolException, TType
import json, base64, sys
__all__ = ['TJSONProtocol', 'TJSONProtocolFactory']
VERSION = 1
COMMA = ','
COLON = ':'
LBRACE = '{'
RBRACE = '}'
LBRACKET = '['
RBRACKET = ']'
QUOTE = '"'
BACKSLASH = '\\'
ZERO = '0'
ESCSEQ = '\\u00'
ESCAPE_CHAR = '"\\bfnrt'
ESCAPE_CHAR_VALS = ['"', '\\', '\b', '\f', '\n', '\r', '\t']
NUMERIC_CHAR = '+-.0123456789Ee'
CTYPES = {TType.BOOL: 'tf',
TType.BYTE: 'i8',
TType.I16: 'i16',
TType.I32: 'i32',
TType.I64: 'i64',
TType.DOUBLE: 'dbl',
TType.STRING: 'str',
TType.STRUCT: 'rec',
TType.LIST: 'lst',
TType.SET: 'set',
TType.MAP: 'map'}
JTYPES = {}
for key in CTYPES.keys():
JTYPES[CTYPES[key]] = key
class JSONBaseContext(object):
def __init__(self, protocol):
self.protocol = protocol
self.first = True
def doIO(self, function):
pass
def write(self):
pass
def read(self):
pass
def escapeNum(self):
return False
class JSONListContext(JSONBaseContext):
def doIO(self, function):
if self.first is True:
self.first = False
else:
function(COMMA)
def write(self):
self.doIO(self.protocol.trans.write)
def read(self):
self.doIO(self.protocol.readJSONSyntaxChar)
class JSONPairContext(JSONBaseContext):
colon = True
def doIO(self, function):
if self.first is True:
self.first = False
self.colon = True
else:
function(COLON if self.colon is True else COMMA)
self.colon = not self.colon
def write(self):
self.doIO(self.protocol.trans.write)
def read(self):
self.doIO(self.protocol.readJSONSyntaxChar)
def escapeNum(self):
return self.colon
class LookaheadReader():
hasData = False
data = ''
def __init__(self, protocol):
self.protocol = protocol
def read(self):
if self.hasData is True:
self.hasData = False
else:
self.data = self.protocol.trans.read(1)
if sys.version_info[0] >= 3 and isinstance(self.data, bytes):
self.data = str(self.data, 'utf-8')
return self.data
def peek(self):
if self.hasData is False:
self.data = self.protocol.trans.read(1)
self.hasData = True
if sys.version_info[0] >= 3 and isinstance(self.data, bytes):
self.data = str(self.data, 'utf-8')
return self.data
class TJSONProtocolBase(TProtocolBase):
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.resetWriteContext()
self.resetReadContext()
def resetWriteContext(self):
self.contextStack = []
self.context = JSONBaseContext(self)
def resetReadContext(self):
self.resetWriteContext()
self.reader = LookaheadReader(self)
def pushContext(self, ctx):
self.contextStack.append(ctx)
self.context = ctx
def popContext(self):
self.contextStack.pop()
def writeJSONString(self, string):
# Python 3 JSON will not serialize bytes
if isinstance(string, bytes) and sys.version_info.major >= 3:
string = string.decode()
self.context.write()
self.trans.write(json.dumps(string))
def writeJSONNumber(self, number):
self.context.write()
jsNumber = str(number)
if self.context.escapeNum():
jsNumber = "%s%s%s" % (QUOTE, jsNumber, QUOTE)
self.trans.write(jsNumber)
def writeJSONBase64(self, binary):
self.context.write()
self.trans.write(QUOTE)
self.trans.write(base64.b64encode(binary))
self.trans.write(QUOTE)
def writeJSONObjectStart(self):
self.context.write()
self.trans.write(LBRACE)
self.pushContext(JSONPairContext(self))
def writeJSONObjectEnd(self):
self.popContext()
self.trans.write(RBRACE)
def writeJSONArrayStart(self):
self.context.write()
self.trans.write(LBRACKET)
self.pushContext(JSONListContext(self))
def writeJSONArrayEnd(self):
self.popContext()
self.trans.write(RBRACKET)
def readJSONSyntaxChar(self, character):
current = self.reader.read()
if character != current:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected character: %s" % current)
def readJSONString(self, skipContext):
string = []
if skipContext is False:
self.context.read()
self.readJSONSyntaxChar(QUOTE)
while True:
character = self.reader.read()
if character == QUOTE:
break
if character == ESCSEQ[0]:
character = self.reader.read()
if character == ESCSEQ[1]:
self.readJSONSyntaxChar(ZERO)
self.readJSONSyntaxChar(ZERO)
data = self.trans.read(2)
if sys.version_info[0] >= 3 and isinstance(data, bytes):
character = json.JSONDecoder().decode(
'"\\u00%s"' % str(data, 'utf-8'))
else:
character = json.JSONDecoder().decode('"\\u00%s"' %
data)
else:
off = ESCAPE_CHAR.find(character)
if off == -1:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Expected control char")
character = ESCAPE_CHAR_VALS[off]
string.append(character)
return ''.join(string)
def isJSONNumeric(self, character):
return (True if NUMERIC_CHAR.find(character) != - 1 else False)
def readJSONQuotes(self):
if (self.context.escapeNum()):
self.readJSONSyntaxChar(QUOTE)
def readJSONNumericChars(self):
numeric = []
while True:
character = self.reader.peek()
if self.isJSONNumeric(character) is False:
break
numeric.append(self.reader.read())
return ''.join(numeric)
def readJSONInteger(self):
self.context.read()
self.readJSONQuotes()
numeric = self.readJSONNumericChars()
self.readJSONQuotes()
try:
return int(numeric)
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONDouble(self):
self.context.read()
if self.reader.peek() == QUOTE:
string = self.readJSONString(True)
try:
double = float(string)
if (self.context.escapeNum is False and
double != float('inf') and
double != float('-inf') and
double != float('nan')
):
raise TProtocolException(TProtocolException.INVALID_DATA,
"Numeric data unexpectedly quoted")
return double
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
else:
if self.context.escapeNum() is True:
self.readJSONSyntaxChar(QUOTE)
try:
return float(self.readJSONNumericChars())
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONBase64(self):
string = self.readJSONString(False)
return base64.b64decode(string)
def readJSONObjectStart(self):
self.context.read()
self.readJSONSyntaxChar(LBRACE)
self.pushContext(JSONPairContext(self))
def readJSONObjectEnd(self):
self.readJSONSyntaxChar(RBRACE)
self.popContext()
def readJSONArrayStart(self):
self.context.read()
self.readJSONSyntaxChar(LBRACKET)
self.pushContext(JSONListContext(self))
def readJSONArrayEnd(self):
self.readJSONSyntaxChar(RBRACKET)
self.popContext()
class TJSONProtocol(TJSONProtocolBase):
def readMessageBegin(self):
self.resetReadContext()
self.readJSONArrayStart()
if self.readJSONInteger() != VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
"Message contained bad version.")
name = self.readJSONString(False)
typen = self.readJSONInteger()
seqid = self.readJSONInteger()
return (name, typen, seqid)
def readMessageEnd(self):
self.readJSONArrayEnd()
def readStructBegin(self):
self.readJSONObjectStart()
def readStructEnd(self):
self.readJSONObjectEnd()
def readFieldBegin(self):
character = self.reader.peek()
ttype = 0
id = 0
if character == RBRACE:
ttype = TType.STOP
else:
id = self.readJSONInteger()
self.readJSONObjectStart()
ttype = JTYPES[self.readJSONString(False)]
return (None, ttype, id)
def readFieldEnd(self):
self.readJSONObjectEnd()
def readMapBegin(self):
self.readJSONArrayStart()
keyType = JTYPES[self.readJSONString(False)]
valueType = JTYPES[self.readJSONString(False)]
size = self.readJSONInteger()
self.readJSONObjectStart()
return (keyType, valueType, size)
def readMapEnd(self):
self.readJSONObjectEnd()
self.readJSONArrayEnd()
def readCollectionBegin(self):
self.readJSONArrayStart()
elemType = JTYPES[self.readJSONString(False)]
size = self.readJSONInteger()
return (elemType, size)
readListBegin = readCollectionBegin
readSetBegin = readCollectionBegin
def readCollectionEnd(self):
self.readJSONArrayEnd()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
def readBool(self):
return (False if self.readJSONInteger() == 0 else True)
def readNumber(self):
return self.readJSONInteger()
readByte = readNumber
readI16 = readNumber
readI32 = readNumber
readI64 = readNumber
def readDouble(self):
return self.readJSONDouble()
def readFloat(self):
return self.readJSONDouble()
def readString(self):
string = self.readJSONString(False)
if sys.version_info.major >= 3:
# Generated code expects that protocols deal in bytes in Py3
return string.encode('utf-8')
return string
def readBinary(self):
return self.readJSONBase64()
def writeMessageBegin(self, name, request_type, seqid):
self.resetWriteContext()
self.writeJSONArrayStart()
self.writeJSONNumber(VERSION)
self.writeJSONString(name)
self.writeJSONNumber(request_type)
self.writeJSONNumber(seqid)
def writeMessageEnd(self):
self.writeJSONArrayEnd()
def writeStructBegin(self, name):
self.writeJSONObjectStart()
def writeStructEnd(self):
self.writeJSONObjectEnd()
def writeFieldBegin(self, name, ttype, id):
self.writeJSONNumber(id)
self.writeJSONObjectStart()
self.writeJSONString(CTYPES[ttype])
def writeFieldEnd(self):
self.writeJSONObjectEnd()
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[ktype])
self.writeJSONString(CTYPES[vtype])
self.writeJSONNumber(size)
self.writeJSONObjectStart()
def writeMapEnd(self):
self.writeJSONObjectEnd()
self.writeJSONArrayEnd()
def writeListBegin(self, etype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[etype])
self.writeJSONNumber(size)
def writeListEnd(self):
self.writeJSONArrayEnd()
def writeSetBegin(self, etype, size):
self.writeJSONArrayStart()
self.writeJSONString(CTYPES[etype])
self.writeJSONNumber(size)
def writeSetEnd(self):
self.writeJSONArrayEnd()
def writeBool(self, boolean):
self.writeJSONNumber(1 if boolean is True else 0)
def writeInteger(self, integer):
self.writeJSONNumber(integer)
writeByte = writeInteger
writeI16 = writeInteger
writeI32 = writeInteger
writeI64 = writeInteger
def writeDouble(self, dbl):
self.writeJSONNumber(dbl)
def writeFloat(self, flt):
self.writeJSONNumber(flt)
def writeString(self, string):
self.writeJSONString(string)
def writeBinary(self, binary):
self.writeJSONBase64(binary)
class TJSONProtocolFactory:
def __init__(self):
pass
def getProtocol(self, trans):
return TJSONProtocol(trans)
| apache-2.0 |
amrdraz/brython | www/src/Lib/test/test_signal.py | 23 | 32346 | import unittest
from test import support
from contextlib import closing
import gc
import pickle
import select
import signal
import struct
import subprocess
import traceback
import sys, os, time, errno
from test.script_helper import assert_python_ok, spawn_python
try:
import threading
except ImportError:
threading = None
if sys.platform in ('os2', 'riscos'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
def handlerB(self, signum, frame):
self.b_called = True
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
pass
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
class WakeupFDTests(unittest.TestCase):
def test_invalid_fd(self):
fd = support.make_bad_fd()
self.assertRaises(ValueError, signal.set_wakeup_fd, fd)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import fcntl
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
for fd in (read, write):
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(signals, ordered, test_body)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(TIMEOUT_FULL)
mid_time = time.time()
dt = mid_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.time()
dt = after_time - mid_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except select.error:
pass
else:
raise Exception("select.error not raised")
after_time = time.time()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
signal.signal(signal.SIGUSR1, handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
os.kill(os.getpid(), signum1)
os.kill(os.getpid(), signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except OSError as err:
if err.errno != errno.EINTR:
raise
else:
sys.exit(2)
sys.exit(3)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=5.0)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %s"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
if sys.platform == 'freebsd6':
# Issue #12392 and #12469: send a signal to the main thread
# doesn't work before the creation of the first thread on
# FreeBSD 6
def noop():
pass
thread = threading.Thread(target=noop)
thread.start()
thread.join()
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
def handler(signum, frame):
1/0
%s
blocked = %s
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
# Issue #18238: sigwaitinfo() can be interrupted on Linux (raises
# InterruptedError), but not on AIX
@unittest.skipIf(sys.platform.startswith("aix"),
'signal.sigwaitinfo() cannot be interrupted on AIX')
def test_sigwaitinfo_interrupted(self):
self.wait_helper(signal.SIGUSR1, '''
def test(signum):
import errno
hndl_called = True
def alarm_handler(signum, frame):
hndl_called = False
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(1)
try:
signal.sigwaitinfo([signal.SIGUSR1])
except OSError as e:
if e.errno == errno.EINTR:
if not hndl_called:
raise Exception("SIGALRM handler not called")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
''')
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipIf(threading is None, "test needs threading module")
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def read_sigmask():
return signal.pthread_sigmask(signal.SIG_BLOCK, [])
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
kill(signum)
# Check the new mask
blocked = read_sigmask()
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediatly the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == 'freebsd6',
"issue #12392: send a signal to the main thread doesn't work "
"before the creation of the first thread on FreeBSD 6")
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
def test_main():
try:
support.run_unittest(PosixTests, InterProcessSignalTests,
WakeupFDTests, WakeupSignalTests,
SiginterruptTest, ItimerTest, WindowsSignalTests,
PendingSignalsTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
| bsd-3-clause |
edx-solutions/edx-platform | lms/djangoapps/courseware/migrations/0005_orgdynamicupgradedeadlineconfiguration.py | 5 | 1776 | # -*- coding: utf-8 -*-
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import lms.djangoapps.courseware.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courseware', '0004_auto_20171010_1639'),
]
operations = [
migrations.CreateModel(
name='OrgDynamicUpgradeDeadlineConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('org_id', models.CharField(max_length=255, db_index=True)),
('deadline_days', models.PositiveSmallIntegerField(default=21, help_text='Number of days a learner has to upgrade after content is made available')),
('opt_out', models.BooleanField(default=False, help_text='Disable the dynamic upgrade deadline for this organization.')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
bases=(lms.djangoapps.courseware.models.OptOutDynamicUpgradeDeadlineMixin, models.Model),
),
migrations.AlterModelOptions(
name='coursedynamicupgradedeadlineconfiguration',
options={'ordering': ('-change_date',)},
),
]
| agpl-3.0 |
onceuponatimeforever/oh-mainline | vendor/packages/docutils/test/test_error_reporting.py | 15 | 12498 | #! /usr/bin/env python
# .. coding: utf-8
# $Id: test_error_reporting.py 7668 2013-06-04 12:46:30Z milde $
# Author: Günter Milde <milde@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
"""
Test `EnvironmentError` reporting.
In some locales, the `errstr` argument of IOError and OSError contains
non-ASCII chars.
In Python 2, converting an exception instance to `str` or `unicode`
might fail, with non-ASCII chars in arguments and the default encoding
and errors ('ascii', 'strict').
Therefore, Docutils must not use string interpolation with exception
instances like, e.g., ::
try:
something
except IOError, error:
print 'Found %s' % error
unless the minimal required Python version has this problem fixed.
"""
import unittest
import sys, os
import codecs
try: # from standard library module `io`
from io import StringIO, BytesIO
except ImportError: # new in Python 2.6
from StringIO import StringIO
BytesIO = StringIO
import DocutilsTestSupport # must be imported before docutils
from docutils import core, parsers, frontend, utils
from docutils.utils.error_reporting import SafeString, ErrorString, ErrorOutput
from docutils._compat import b, bytes
oldlocale = None
if sys.version_info < (3,0): # problems solved in py3k
try:
import locale # module missing in Jython
oldlocale = locale.getlocale()
# Why does getlocale return the defaultlocale in Python 3.2 ????
# oldlocale = (None, None) # test suite runs without locale
except ImportError:
print ('cannot test error reporting with problematic locales,\n'
'`import locale` failed.')
# locales confirmed to use non-ASCII chars in the IOError message
# for a missing file (https://bugs.gentoo.org/show_bug.cgi?id=349101)
# TODO: add more confirmed problematic locales
problematic_locales = ['cs_CZ', 'cs_CZ.UTF8',
'el_GR', 'el_GR.UTF-8',
# 'fr_FR.UTF-8', # only OSError
'ja_JP.UTF-8',
'ru_RU', 'ru_RU.KOI8-R',
'ru_RU.UTF-8',
'', # default locale: might be non-problematic
]
if oldlocale is not None:
# find a supported problematic locale:
for testlocale in problematic_locales:
try:
locale.setlocale(locale.LC_ALL, testlocale)
except locale.Error:
testlocale = None
else:
break
locale.setlocale(locale.LC_ALL, oldlocale) # reset
else:
testlocale = None
class SafeStringTests(unittest.TestCase):
# the error message in EnvironmentError instances comes from the OS
# and in some locales (e.g. ru_RU), contains high bit chars.
# -> see the test in test_error_reporting.py
# test data:
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
be = Exception(bs) # unicode(be) fails
ue = Exception(us) # bytes(ue) fails, str(ue) fails in Python 2;
# unicode(ue) fails in Python < 2.6 (issue2517_)
# .. _issue2517: http://bugs.python.org/issue2517
# wrapped test data:
wbs = SafeString(bs)
wus = SafeString(us)
wbe = SafeString(be)
wue = SafeString(ue)
def test_7bit(self):
# wrapping (not required with 7-bit chars) must not change the
# result of conversions:
bs7 = b('foo')
us7 = u'foo'
be7 = Exception(bs7)
ue7 = Exception(us7)
self.assertEqual(str(42), str(SafeString(42)))
self.assertEqual(str(bs7), str(SafeString(bs7)))
self.assertEqual(str(us7), str(SafeString(us7)))
self.assertEqual(str(be7), str(SafeString(be7)))
self.assertEqual(str(ue7), str(SafeString(ue7)))
self.assertEqual(unicode(7), unicode(SafeString(7)))
self.assertEqual(unicode(bs7), unicode(SafeString(bs7)))
self.assertEqual(unicode(us7), unicode(SafeString(us7)))
self.assertEqual(unicode(be7), unicode(SafeString(be7)))
self.assertEqual(unicode(ue7), unicode(SafeString(ue7)))
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(self.bs) fails
self.assertEqual(unicode, type(unicode(self.wbs)))
self.assertEqual(unicode(self.us), unicode(self.wus))
# unicode(self.be) fails
self.assertEqual(unicode, type(unicode(self.wbe)))
# unicode(ue) fails in Python < 2.6 (issue2517_)
self.assertEqual(unicode, type(unicode(self.wue)))
self.assertEqual(self.us, unicode(self.wue))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bs), str(self.wbs))
self.assertEqual(str(self.be), str(self.be))
# str(us) fails in Python 2
self.assertEqual(str, type(str(self.wus)))
# str(ue) fails in Python 2
self.assertEqual(str, type(str(self.wue)))
class ErrorStringTests(unittest.TestCase):
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
def test_str(self):
self.assertEqual('Exception: spam',
str(ErrorString(Exception('spam'))))
self.assertEqual('IndexError: '+str(self.bs),
str(ErrorString(IndexError(self.bs))))
self.assertEqual('ImportError: %s' % SafeString(self.us),
str(ErrorString(ImportError(self.us))))
def test_unicode(self):
self.assertEqual(u'Exception: spam',
unicode(ErrorString(Exception(u'spam'))))
self.assertEqual(u'IndexError: '+self.us,
unicode(ErrorString(IndexError(self.us))))
self.assertEqual(u'ImportError: %s' % SafeString(self.bs),
unicode(ErrorString(ImportError(self.bs))))
# ErrorOutput tests
# -----------------
# Stub: Buffer with 'strict' auto-conversion of input to byte string:
class BBuf(BytesIO, object): # super class object required by Python <= 2.5
def write(self, data):
if isinstance(data, unicode):
data.encode('ascii', 'strict')
super(BBuf, self).write(data)
# Stub: Buffer expecting unicode string:
class UBuf(StringIO, object): # super class object required by Python <= 2.5
def write(self, data):
# emulate Python 3 handling of stdout, stderr
if isinstance(data, bytes):
raise TypeError('must be unicode, not bytes')
super(UBuf, self).write(data)
class ErrorOutputTests(unittest.TestCase):
def test_defaults(self):
e = ErrorOutput()
self.assertEqual(e.stream, sys.stderr)
def test_bbuf(self):
buf = BBuf() # buffer storing byte string
e = ErrorOutput(buf, encoding='ascii')
# write byte-string as-is
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc'))
# encode unicode data with backslashescape fallback replacement:
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc'))
# handle Exceptions with Unicode string args
# unicode(Exception(u'e\xfc')) # fails in Python < 2.6
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc'))
# encode with `encoding` attribute
e.encoding = 'utf8'
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc u\xc3\xbc'))
def test_ubuf(self):
buf = UBuf() # buffer only accepting unicode string
# decode of binary strings
e = ErrorOutput(buf, encoding='ascii')
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd') # use REPLACEMENT CHARACTER
# write Unicode string and Exceptions with Unicode args
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc')
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc')
# decode with `encoding` attribute
e.encoding = 'latin1'
e.write(b(' b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc b\xfc')
class SafeStringTests_locale(unittest.TestCase):
"""
Test docutils.SafeString with 'problematic' locales.
The error message in `EnvironmentError` instances comes from the OS
and in some locales (e.g. ru_RU), contains high bit chars.
"""
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
# test data:
bs = b('\xfc')
us = u'\xfc'
try:
open(b('\xfc'))
except IOError, e: # in Python 3 the name for the exception instance
bioe = e # is local to the except clause
try:
open(u'\xfc')
except IOError, e:
uioe = e
except UnicodeEncodeError:
try:
open(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except IOError, e:
uioe = e
try:
os.chdir(b('\xfc'))
except OSError, e:
bose = e
try:
os.chdir(u'\xfc')
except OSError, e:
uose = e
except UnicodeEncodeError:
try:
os.chdir(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except OSError, e:
uose = e
# wrapped test data:
wbioe = SafeString(bioe)
wuioe = SafeString(uioe)
wbose = SafeString(bose)
wuose = SafeString(uose)
# reset locale
if testlocale:
locale.setlocale(locale.LC_ALL, oldlocale)
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(bioe) fails with e.g. 'ru_RU.utf8' locale
self.assertEqual(unicode, type(unicode(self.wbioe)))
self.assertEqual(unicode, type(unicode(self.wuioe)))
self.assertEqual(unicode, type(unicode(self.wbose)))
self.assertEqual(unicode, type(unicode(self.wuose)))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bioe), str(self.wbioe))
self.assertEqual(str(self.uioe), str(self.wuioe))
self.assertEqual(str(self.bose), str(self.wbose))
self.assertEqual(str(self.uose), str(self.wuose))
class ErrorReportingTests(unittest.TestCase):
"""
Test cases where error reporting can go wrong.
Do not test the exact output (as this varies with the locale), just
ensure that the correct exception is thrown.
"""
# These tests fail with a 'problematic locale' and
# (revision < 7035) and Python-2.
parser = parsers.rst.Parser()
"""Parser shared by all ParserTestCases."""
option_parser = frontend.OptionParser(components=(parsers.rst.Parser,))
settings = option_parser.get_default_values()
settings.report_level = 1
settings.halt_level = 1
settings.warning_stream = ''
document = utils.new_document('test data', settings)
def setUp(self):
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
def tearDown(self):
if testlocale:
locale.setlocale(locale.LC_ALL, oldlocale)
def test_include(self):
source = ('.. include:: bogus.txt')
self.assertRaises(utils.SystemMessage,
self.parser.parse, source, self.document)
def test_raw_file(self):
source = ('.. raw:: html\n'
' :file: bogus.html\n')
self.assertRaises(utils.SystemMessage,
self.parser.parse, source, self.document)
def test_raw_url(self):
source = ('.. raw:: html\n'
' :url: http://bogus.html\n')
self.assertRaises(utils.SystemMessage,
self.parser.parse, source, self.document)
def test_csv_table(self):
source = ('.. csv-table:: external file\n'
' :file: bogus.csv\n')
self.assertRaises(utils.SystemMessage,
self.parser.parse, source, self.document)
def test_csv_table_url(self):
source = ('.. csv-table:: external URL\n'
' :url: ftp://bogus.csv\n')
self.assertRaises(utils.SystemMessage,
self.parser.parse, source, self.document)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
jcasner/nupic | examples/prediction/experiments/dutyCycle/problem/description.py | 50 | 1547 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
#sensorVerbosity=3,
iterationCount = 1000,
numAValues = 10,
numBValues = 10,
#encodingFieldStyleA = 'contiguous',
encodingFieldWidthA = 50,
#encodingOnBitsA = 5,
#encodingFieldStyleB = 'contiguous',
encodingFieldWidthB = 50,
#encodingOnBitsB = 5,
b0Likelihood = None,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
UniversalMasterEgg8679/ansible | lib/ansible/modules/network/f5/bigip_node.py | 72 | 16390 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_node
short_description: "Manages F5 BIG-IP LTM nodes"
description:
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
state:
description:
- Pool member state
required: true
default: present
choices: ['present', 'absent']
aliases: []
session_state:
description:
- Set new session availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
monitor_state:
description:
- Set monitor availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
choices: []
aliases: []
name:
description:
- "Node name"
required: false
default: null
choices: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "2.2"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
host:
description:
- "Node IP. Required when state=present and node does not exist. Error when state=absent."
required: true
default: null
choices: []
aliases: ['address', 'ip']
description:
description:
- "Node description."
required: false
default: null
choices: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add node
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
host: "10.20.30.40"
name: "10.20.30.40"
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
# Future plays referencing this node no longer use the host
# parameter but instead use the name parameter.
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- name: Add node with a single 'ping' monitor
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
host: "10.20.30.40"
name: "mytestserver"
monitors:
- /Common/icmp
delegate_to: localhost
- name: Modify node description
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
name: "10.20.30.40"
description: "Our best server yet"
delegate_to: localhost
- name: Delete node
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
partition: "Common"
name: "10.20.30.40"
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force node offline
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "mysecret"
state: "present"
session_state: "disabled"
monitor_state: "disabled"
partition: "Common"
name: "10.20.30.40"
'''
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_node_address(api, address, name):
try:
api.LocalLB.NodeAddressV2.create(
nodes=[name],
addresses=[address],
limits=[0]
)
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
else:
# genuine exception
raise
return (result, desc)
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
else:
# genuine exception
raise
return (result, desc)
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def get_monitors(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0]
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, name, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name],
monitor_rules=[monitor_rule])
def main():
monitor_type_choices = ['and_list', 'm_of_n']
argument_spec = f5_argument_spec()
meta_args = dict(
session_state=dict(type='str', choices=['enabled', 'disabled']),
monitor_state=dict(type='str', choices=['enabled', 'disabled']),
name=dict(type='str', required=True),
host=dict(type='str', aliases=['address', 'ip']),
description=dict(type='str'),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
)
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
host = module.params['host']
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
# sanity check user supplied values
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if node_exists(api, address):
if not module.check_mode:
deleted, desc = delete_node_address(api, address)
if not deleted:
module.fail_json(msg="unable to delete: %s" % desc)
else:
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if not node_exists(api, address):
if host is None:
module.fail_json(msg="host parameter required when "
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
if not created:
module.fail_json(msg="unable to create: %s" % desc)
else:
result = {'changed': True}
if session_state is not None:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
set_node_monitor_state(api, address, monitor_state)
result = {'changed': True}
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
set_monitors(api, address, monitor_type, quorum, monitors)
else:
# check-mode return value
result = {'changed': True}
else:
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
module.fail_json(msg="Changing the node address is "
"not supported by the API; "
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
if session_state == 'enabled' and \
session_status == 'forced_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_node_monitor_status(api, address)
if monitor_state == 'enabled' and \
monitor_status == 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
if description is not None:
if get_node_description(api, address) != description:
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, address, monitor_type, quorum, monitors)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| gpl-3.0 |
kthordarson/youtube-dl-ruv | youtube_dl/extractor/thisav.py | 19 | 1636 | #coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import determine_ext
class ThisAVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*'
_TEST = {
'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html',
'md5': '0480f1ef3932d901f0e0e719f188f19b',
'info_dict': {
'id': '47734',
'ext': 'flv',
'title': '高樹マリア - Just fit',
'uploader': 'dj7970',
'uploader_id': 'dj7970'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>([^<]*)</h1>', webpage, 'title')
video_url = self._html_search_regex(
r"addVariable\('file','([^']+)'\);", webpage, 'video url')
uploader = self._html_search_regex(
r': <a href="http://www.thisav.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>',
webpage, 'uploader name', fatal=False)
uploader_id = self._html_search_regex(
r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
webpage, 'uploader id', fatal=False)
ext = determine_ext(video_url)
return {
'id': video_id,
'url': video_url,
'uploader': uploader,
'uploader_id': uploader_id,
'title': title,
'ext': ext,
}
| unlicense |
NickPresta/sentry | src/sentry/migrations/0098_auto__add_user__chg_field_team_owner__chg_field_activity_user__chg_fie.py | 3 | 28381 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connections
class Migration(SchemaMigration):
def forwards(self, orm):
if 'auth_user' in connections['default'].introspection.table_names():
return
self.create_auth(orm)
def create_auth(self, orm):
# Adding model 'User'
db.create_table('auth_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'auth', ['User'])
# Adding M2M table for field groups on 'User'
db.create_table('auth_user_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'auth.user'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique('auth_user_groups', ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
db.create_table('auth_user_user_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'auth.user'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique('auth_user_user_permissions', ['user_id', 'permission_id'])
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['auth.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['auth.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['auth.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
abigailStev/stingray | stingray/pulse/modeling.py | 2 | 4997 | import numpy as np
from astropy.modeling import models, fitting
__all__ = ["sinc_square_model", "sinc_square_deriv", "fit_sinc",
"fit_gaussian", "SincSquareModel"]
def sinc(x):
"""
Calculate a sinc function.
sinc(x)=sin(x)/x
Parameters
----------
x : array-like
Returns
-------
values : array-like
"""
values = np.sinc(x/np.pi)
return values
def sinc_square_model(x, amplitude=1., mean=0., width=1.):
"""
Calculate a sinc-squared function.
(sin(x)/x)**2
Parameters
----------
x: array-like
Other Parameters
----------
amplitude : float
the value for x=mean
mean : float
mean of the sinc function
width : float
width of the sinc function
Returns
-------
sqvalues : array-like
Return square of sinc function
Examples
--------
>>> sinc_square_model(0, amplitude=2.)
2.0
"""
sqvalues = amplitude * sinc((x-mean)/width) ** 2
return sqvalues
def sinc_square_deriv(x, amplitude=1., mean=0., width=1.):
"""
Calculate partial derivatives of sinc-squared.
Parameters
----------
x: array-like
Other Parameters
----------
amplitude : float
the value for x=mean
mean : float
mean of the sinc function
width : float
width of the sinc function
Returns
-------
d_amplitude : array-like
partial derivative of sinc-squared function
with respect to the amplitude
d_mean : array-like
partial derivative of sinc-squared function
with respect to the mean
d_width : array-like
partial derivative of sinc-squared function
with respect to the width
Examples
--------
>>> np.all(sinc_square_deriv(0, amplitude=2.) == [1., 0., 0.])
True
"""
x_is_zero = x == mean
d_x = 2 * amplitude * \
sinc((x-mean)/width) * (
x * np.cos((x-mean)/width) -
np.sin((x - mean) / width)) / ((x - mean) / width) ** 2
d_x = np.asarray(d_x)
d_amplitude = sinc((x-mean)/width)**2
d_x[x_is_zero] = 0
d_mean = d_x*(-1/width)
d_width = d_x*(-(x-mean)/(width)**2)
return [d_amplitude, d_mean, d_width]
_SincSquareModel = models.custom_model(sinc_square_model,
fit_deriv=sinc_square_deriv)
class SincSquareModel(_SincSquareModel):
def __reduce__(cls):
members = dict(cls.__dict__)
return (type(cls), (), members)
def fit_sinc(x, y, amp=1.5, mean=0., width=1., tied={}, fixed={}, bounds={},
obs_length=None):
"""
Fit a sinc function to x,y values.
Parameters
----------
x : array-like
y : array-like
Other Parameters
----------------
amp : float
The initial value for the amplitude
mean : float
The initial value for the mean of the sinc
obs_length : float
The length of the observation. Default None. If it's defined, it
fixes width to 1/(pi*obs_length), as expected from epoch folding
periodograms
width : float
The initial value for the width of the sinc. Only valid if
obs_length is 0
tied : dict
fixed : dict
bounds : dict
Parameters to be passed to the [astropy models]_
Returns
-------
sincfit : function
The best-fit function, accepting x as input
and returning the best-fit model as output
References
----------
.. [astropy models] http://docs.astropy.org/en/stable/api/astropy.modeling.functional_models.Gaussian1D.html
"""
if obs_length is not None:
width = 1 / (np.pi * obs_length)
fixed["width"] = True
sinc_in = SincSquareModel(amplitude=amp, mean=mean, width=width, tied=tied,
fixed=fixed, bounds=bounds)
fit_s = fitting.LevMarLSQFitter()
sincfit = fit_s(sinc_in, x, y)
return sincfit
def fit_gaussian(x, y, amplitude=1.5, mean=0., stddev=2., tied={}, fixed={},
bounds={}):
"""
Fit a gaussian function to x,y values.
Parameters
----------
x : array-like
y : array-like
Other Parameters
----------------
amplitude : float
The initial value for the amplitude
mean : float
The initial value for the mean of the gaussian function
stddev : float
The initial value for the standard deviation of the gaussian function
tied : dict
fixed : dict
bounds : dict
Parameters to be passed to the [astropy models]_
Returns
-------
g : function
The best-fit function, accepting x as input
and returning the best-fit model as output
"""
g_in = models.Gaussian1D(amplitude=amplitude, mean=mean, stddev=stddev,
tied=tied, fixed=fixed, bounds=bounds)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_in, x, y)
return g
| mit |
SPKian/Testing | erpnext/hr/doctype/appraisal/appraisal.py | 75 | 2331 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Appraisal(Document):
def validate(self):
if not self.status:
self.status = "Draft"
set_employee_name(self)
self.validate_dates()
self.validate_existing_appraisal()
self.calculate_total()
def get_employee_name(self):
self.employee_name = frappe.db.get_value("Employee", self.employee, "employee_name")
return self.employee_name
def validate_dates(self):
if getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End Date can not be less than Start Date"))
def validate_existing_appraisal(self):
chk = frappe.db.sql("""select name from `tabAppraisal` where employee=%s
and (status='Submitted' or status='Completed')
and ((start_date>=%s and start_date<=%s)
or (end_date>=%s and end_date<=%s))""",
(self.employee,self.start_date,self.end_date,self.start_date,self.end_date))
if chk:
frappe.throw(_("Appraisal {0} created for Employee {1} in the given date range").format(chk[0][0], self.employee_name))
def calculate_total(self):
total, total_w = 0, 0
for d in self.get('goals'):
if d.score:
d.score_earned = flt(d.score) * flt(d.per_weightage) / 100
total = total + d.score_earned
total_w += flt(d.per_weightage)
if int(total_w) != 100:
frappe.throw(_("Total weightage assigned should be 100%. It is {0}").format(str(total_w) + "%"))
if frappe.db.get_value("Employee", self.employee, "user_id") != \
frappe.session.user and total == 0:
frappe.throw(_("Total cannot be zero"))
self.total_score = total
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
@frappe.whitelist()
def fetch_appraisal_template(source_name, target_doc=None):
target_doc = get_mapped_doc("Appraisal Template", source_name, {
"Appraisal Template": {
"doctype": "Appraisal",
},
"Appraisal Template Goal": {
"doctype": "Appraisal Goal",
}
}, target_doc)
return target_doc
| agpl-3.0 |
Francis-Liu/animated-broccoli | nova/api/openstack/compute/legacy_v2/contrib/cloudpipe_update.py | 71 | 2597 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
from nova import objects
authorize = extensions.extension_authorizer('compute', 'cloudpipe_update')
class CloudpipeUpdateController(wsgi.Controller):
"""Handle updating the vpn ip/port for cloudpipe instances."""
def __init__(self):
super(CloudpipeUpdateController, self).__init__()
@wsgi.action("update")
def update(self, req, id, body):
"""Configure cloudpipe parameters for the project."""
context = req.environ['nova.context']
authorize(context)
if id != "configure-project":
msg = _("Unknown action %s") % id
raise webob.exc.HTTPBadRequest(explanation=msg)
project_id = context.project_id
networks = objects.NetworkList.get_by_project(context, project_id)
try:
params = body['configure_project']
vpn_ip = params['vpn_ip']
vpn_port = params['vpn_port']
for network in networks:
network.vpn_public_address = vpn_ip
network.vpn_public_port = vpn_port
network.save()
except (TypeError, KeyError, ValueError) as ex:
msg = _("Invalid request body: %s") % ex
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
class Cloudpipe_update(extensions.ExtensionDescriptor):
"""Adds the ability to set the vpn ip/port for cloudpipe instances."""
name = "CloudpipeUpdate"
alias = "os-cloudpipe-update"
namespace = "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2"
updated = "2012-11-14T00:00:00Z"
def get_controller_extensions(self):
controller = CloudpipeUpdateController()
extension = extensions.ControllerExtension(self, 'os-cloudpipe',
controller)
return [extension]
| apache-2.0 |
davidmontgom/pyvmomi-community-samples | samples/destroy_vm.py | 9 | 2836 | #!/usr/bin/env python
# Copyright 2015 Michael Rice <michael@michaelrice.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import requests
from pyVim import connect
from tools import cli
from tools import tasks
requests.packages.urllib3.disable_warnings()
def setup_args():
"""Adds additional ARGS to allow the vm name or uuid to
be set.
"""
parser = cli.build_arg_parser()
# using j here because -u is used for user
parser.add_argument('-j', '--uuid',
help='BIOS UUID of the VirtualMachine you want '
'to reboot.')
parser.add_argument('-n', '--name',
help='DNS Name of the VirtualMachine you want to '
'reboot.')
parser.add_argument('-i', '--ip',
help='IP Address of the VirtualMachine you want to '
'reboot')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
ARGS = setup_args()
SI = None
try:
SI = connect.SmartConnect(host=ARGS.host,
user=ARGS.user,
pwd=ARGS.password,
port=ARGS.port)
atexit.register(connect.Disconnect, SI)
except IOError, ex:
pass
if not SI:
raise SystemExit("Unable to connect to host with supplied info.")
VM = None
if ARGS.uuid:
VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid,
True,
False)
elif ARGS.name:
VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name,
True)
elif ARGS.ip:
VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True)
if VM is None:
raise SystemExit("Unable to locate VirtualMachine.")
print("Found: {0}".format(VM.name))
print("The current powerState is: {0}".format(VM.runtime.powerState))
if format(VM.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(VM.name))
TASK = VM.PowerOffVM_Task()
tasks.wait_for_tasks(SI, [TASK])
print("{0}".format(TASK.info.state))
print("Destroying VM from vSphere.")
TASK = VM.Destroy_Task()
tasks.wait_for_tasks(SI, [TASK])
print("Done.")
| apache-2.0 |
MadeInHaus/django-template | backend/apps/utils/basic_auth_middleware.py | 1 | 1243 | import base64
from django.http import HttpResponse
from django.middleware.common import CommonMiddleware
from django.conf import settings
import os
class AuthMiddleware(CommonMiddleware):
"""
Add this to middleware:
'utils.basic_auth_middleware.AuthMiddleware',
Add these settings:
USE_BASIC_AUTH = True # This setting is optionally settable as an env var, env var will override whatever is set in settings
BASIC_AUTH_USER = 'user'
BASIC_AUTH_PASS = 'password'
"""
def process_request(self, request):
if (getattr(settings, 'USE_BASIC_AUTH', False) or os.environ.get('USE_BASIC_AUTH', 'False')=='True') and not os.environ.get('USE_BASIC_AUTH', None)=='False':
if request.META.get('HTTP_AUTHORIZATION', False):
authtype, auth = request.META['HTTP_AUTHORIZATION'].split(' ')
auth = base64.b64decode(auth)
username, password = auth.split(':')
if (username == getattr(settings, 'BASIC_AUTH_USER', None)
and password == getattr(settings, 'BASIC_AUTH_PASS', None)):
return
r = HttpResponse("Auth Required", status = 401)
r['WWW-Authenticate'] = 'Basic realm="bat"'
return r
| mit |
knorrium/selenium | py/selenium/webdriver/remote/switch_to.py | 64 | 3014 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .command import Command
from selenium.webdriver.common.alert import Alert
class SwitchTo:
def __init__(self, driver):
self._driver = driver
@property
def active_element(self):
"""
Returns the element with focus, or BODY if nothing has focus.
:Usage:
element = driver.switch_to.active_element
"""
return self._driver.execute(Command.GET_ACTIVE_ELEMENT)['value']
@property
def alert(self):
"""
Switches focus to an alert on the page.
:Usage:
alert = driver.switch_to.alert
"""
return Alert(self._driver)
def default_content(self):
"""
Switch focus to the default frame.
:Usage:
driver.switch_to.default_content()
"""
self._driver.execute(Command.SWITCH_TO_FRAME, {'id': None})
def frame(self, frame_reference):
"""
Switches focus to the specified frame, by index, name, or webelement.
:Args:
- frame_reference: The name of the window to switch to, an integer representing the index,
or a webelement that is an (i)frame to switch to.
:Usage:
driver.switch_to.frame('frame_name')
driver.switch_to.frame(1)
driver.switch_to.frame(driver.find_elements_by_tag_name("iframe")[0])
"""
self._driver.execute(Command.SWITCH_TO_FRAME, {'id': frame_reference})
def parent_frame(self):
"""
Switches focus to the parent context. If the current context is the top
level browsing context, the context remains unchanged.
:Usage:
driver.switch_to.parent_frame()
"""
self._driver.execute(Command.SWITCH_TO_PARENT_FRAME)
def window(self, window_name):
"""
Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
driver.switch_to.window('main')
"""
data = {'name': window_name}
if self._driver.w3c:
data = {'handle': window_name}
self._driver.execute(Command.SWITCH_TO_WINDOW, data)
| apache-2.0 |
allo-/django-bingo | bingo/config.py | 1 | 5216 | from django.db import models
from django.contrib.sites.models import Site
from colorful.fields import RGBColorField
from django.contrib.sites.shortcuts import get_current_site
from django.utils.translation import ugettext as _, pgettext_lazy
def default_time():
now = timezone.get_current_timezone().normalize(timezone.now())
return now.replace(hour=0, minute=0, second=0)
class Config(models.Model):
site = models.OneToOneField(Site, unique=True)
# Game Start and End
start_enabled = models.BooleanField(default=False,
help_text="Disable to disallow new games, even in the game times.")
start_time_begin = models.TimeField(default=None, blank=True, null=True,
help_text="Begin of the time where games can be started "
"(usually before the start of the show).")
start_time_end = models.TimeField(default=None, blank=True, null=True,
help_text="End of the time where games can be started "
"(usually before the start of the show).")
vote_start_time = models.TimeField(default=None, blank=True, null=True,
help_text="Time at which voting starts "
"(usually the start of the show).")
end_time = models.TimeField(default=None, blank=True, null=True,
help_text="Time at which a game is stopped (end of the show).")
week_days_monday = models.BooleanField(default=True,
help_text="True, when games can be started on mondays.")
week_days_tuesday = models.BooleanField(default=True,
help_text="True, when games can be started on tuesdays.")
week_days_wednesday = models.BooleanField(default=True,
help_text="True, when games can be started on wednesdays.")
week_days_thursday = models.BooleanField(default=True,
help_text="True, when games can be started on thursdays.")
week_days_friday = models.BooleanField(default=True,
help_text="True, when games can be started on fridays.")
week_days_saturday = models.BooleanField(default=True,
help_text="True, when games can be started on saturdays.")
week_days_sunday = models.BooleanField(default=True,
help_text="True, when games can be started on sundays.")
# Timeouts
soft_timeout = models.IntegerField(default=None, blank=True, null=True,
help_text = "Minutes after which inactive games are stopped. "
"Either Soft Timeout or Hard Timeout must be set or your "
"games will never stop.")
hard_timeout = models.IntegerField(default=None, blank=True, null=True,
help_text = "Minutes after which games are stopped. "
"Either Soft Timeout or Hard Timeout must be set or your "
"games will never stop.")
user_activity_timeout = models.IntegerField(default=5, blank=True,
null=True,
help_text = "Minutes after which a user is considered inactive.")
# Description
description_enabled = models.BooleanField(default=False,
help_text="Allow the User starting the game to set a description.")
# Look and Feel
thumbnails_enabled = models.BooleanField(default=True)
colors_from = models.IntegerField(default=80,
help_text="Color intensity for the game fields.")
colors_to = models.IntegerField(default=160,
help_text="Color intensity for the game fields.")
middle_field_datetime_format = models.CharField(max_length=30,
default="%Y-%m-%d %H:%M",
help_text="Format for the date and time on the middle field.")
# Twitter integration
tweetbutton_text = models.CharField(max_length=280, blank=True,
default=pgettext_lazy("tweet text", "My bingo board:"),
help_text="The text that is used when a user clicks " +
"the tweet or toot button.")
tweetbutton_hashtags = models.CharField(max_length=280, blank=True,
default="bingo",
help_text="A comma separated list of hashtags that are used when " +
"the user clicks the tweet or toot button.")
twittercard_account = models.CharField(max_length=280, blank=True,
default="",
help_text="The Twitter account associated with the Twitter card " +
"(useful for Twitter statistics)")
twittercard_image = models.URLField(blank=True,
default="",
help_text="An Image URL for a Twitter card image." +
" (leave blank to use the default)")
# Description and announcements on the main page
bingo_description = models.TextField(blank=True, default="",
help_text="An optional description of the bingo, that will be" +
"shown on the main page. HTML is allowed, so make sure to escape <, >" +
" and similar characters correctly and close all your HTML tags.")
def __str__(self):
return "Configuration for site {0}".format(self.site)
def get(key, request=None, site=None, *args, **kwargs):
try:
if not site:
assert request, "Either request or site must be set."
site = get_current_site(request)
(config, created) = Config.objects.get_or_create(site=site)
return getattr(config, key)
except AttributeError as e:
if 'default' in kwargs:
return kwargs['default']
else:
raise e
| agpl-3.0 |
aristotle-tek/cuny-bdif | AWS/ec2/lib/boto-2.34.0/boto/glacier/response.py | 17 | 2193 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
class GlacierResponse(dict):
"""
Represents a response from Glacier layer1. It acts as a dictionary
containing the combined keys received via JSON in the body (if
supplied) and headers.
"""
def __init__(self, http_response, response_headers):
self.http_response = http_response
self.status = http_response.status
self[u'RequestId'] = http_response.getheader('x-amzn-requestid')
if response_headers:
for header_name, item_name in response_headers:
self[item_name] = http_response.getheader(header_name)
if http_response.getheader('Content-Type') == 'application/json':
body = json.loads(http_response.read().decode('utf-8'))
self.update(body)
size = http_response.getheader('Content-Length', None)
if size is not None:
self.size = size
def read(self, amt=None):
"Reads and returns the response body, or up to the next amt bytes."
return self.http_response.read(amt)
| mit |
adobs/project | flask_app.py | 1 | 13126 | from flask import Flask, request, render_template, redirect, flash, session, jsonify, g
from jinja2 import StrictUndefined
from model import Profile, Adjective, Gender, Orientation, Location, db, connect_to_db
from flask_helper_functions.selenium_okc import create_new_user
from flask_helper_functions.sending_a_message import send_message
from flask_helper_functions.signing_in import is_signed_in
import re
from okcupyd.session import Session
from okcupyd.user import User
from flask_helper_functions.map_helper import get_compiled
from flask_helper_functions.send_message_map import send
from flask_helper_functions.markov import get_input_text, make_chains, make_text
import json
from flask_helper_functions.create_word_chart import create_self_summary_words, create_message_me_if_words, prepare_data
from sqlalchemy.sql import func
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails silently.
# This is horrible. Fix this so that, instead, it raises an error.
app.jinja_env.undefined = StrictUndefined
JS_TESTING_MODE = False
@app.before_request
def add_tests():
g.jasmine_tests = JS_TESTING_MODE
@app.route("/")
def home():
""" Home page """
return render_template("home.html")
@app.route("/", methods=["POST"])
def home_landing():
""" Home page """
return redirect("/")
@app.route("/new-user-form")
def new_user_form():
""" Registration form """
months = xrange(1,13)
days = xrange(1,32)
years = xrange(1997,1914,-1)
return render_template("create_new_user_form.html", months=months, days=days, years=years)
@app.route("/new-user", methods=["POST"])
def create_a_new_user():
"""JSON - gets new user information and sends to OKC"""
orientation = request.form.get("orientation")
gender = request.form.get("gender")
birthmonth = request.form.get("birthmonth")
birthdate = request.form.get("birthdate")
birthyear = request.form.get("birthyear")
zipcode = request.form.get("zip")
email = request.form.get("email")
screenname = request.form.get("screenname")
password = request.form.get("password")
results = create_new_user(orientation, gender, birthmonth, birthdate, birthyear, zipcode, email, screenname, password)
if results == "success":
if is_signed_in(screenname, password) == "True":
session["screenname"] = screenname
session["password"] = password
flash("You have successfully created a new user")
return results
@app.route("/login")
def login_form():
""" Login page """
return render_template("login.html")
@app.route("/login", methods=["POST"])
def login():
""" Gets JSON to see if user is able to log in """
screenname = request.form.get("screenname")
password = request.form.get("password")
if is_signed_in(screenname, password) == "True":
session["screenname"] = screenname
session["password"] = password
flash("You have successfully logged in")
return is_signed_in(screenname, password)
@app.route("/logout")
def logout():
""" Logout page """
session.clear()
flash("You have been logged out")
return redirect("/")
@app.route("/okcbot")
def bot_form():
""" OKCBot page"""
locations = db.session.query(Location).all()
return render_template("okcbot.html", locations=locations)
@app.route("/okcbot", methods=["POST"])
def bot():
""" Bot sends text based on parameters """
minimum_age = int(request.form.get("minimum_age"))
maximum_age = int(request.form.get("maximum_age"))
location = request.form.get("location")
radius = int(request.form.get("radius"))
gentation = request.form.get("gentation")
message = request.form.get("message")
num = int(request.form.get("num"))
result = send_message(session["screenname"], session["password"], minimum_age, maximum_age, location, radius, gentation, message, num)
if result:
return result
else:
flash("Message(s) successfully sent")
return ""
@app.route("/map")
def map():
""" Map page. """
orientations = db.session.query(Orientation).order_by(Orientation.orientation).all()
genders = db.session.query(Gender).order_by(Gender.gender).all()
return render_template("map3.html", orientations=orientations, genders=genders)
@app.route("/map-checked.json")
def map_checked_json():
""" Gets JSON - map marker/label inputs """
orientation = request.args.get("orientation")
orientation = re.sub('orientation=','',orientation)
orientation_list = re.split('&',orientation)
orientation_tuple=tuple(orientation_list)
gender = request.args.get("gender")
gender = re.sub('gender=','',gender)
gender = re.sub('\+',' ',gender)
gender_list = re.split('&',gender)
gender_tuple = tuple(gender_list)
age = request.args.get("age")
age_list = re.split(' \- ',age)
age_min, age_max = age_list
minimum_latitude = float((request.args.get("minimum_latitude")).encode("utf-8"))
maximum_latitude = float((request.args.get("maximum_latitude")).encode('utf-8'))
maximum_longitude = float((request.args.get("maximum_longitude")).encode('utf-8'))
minimum_longitude = float((request.args.get('minimum_longitude')).encode('utf-8'))
logged_in = "False"
# if logged in
if session.get("screenname"):
logged_in = "True"
users = db.session.query(Profile.username).filter(
Profile.orientation.in_(orientation_tuple)).filter(
Profile.gender.in_(gender_tuple)).filter(
Profile.age >= age_min).filter(Profile.age <= age_max)
results = db.session.query(Adjective.username, Profile.location,
Adjective.adjective, Location.latitude,
Location.longitude).join(Profile).join(
Location).filter(Location.latitude >= minimum_latitude).filter(Location.latitude <=
maximum_latitude).filter(Location.longitude >= minimum_longitude).filter(Location.longitude <=
maximum_longitude).filter(Adjective.username.in_(users)).all()
compiled = get_compiled(logged_in, results)
return jsonify(compiled)
@app.route("/send-message.json", methods=["POST"])
def send_messages_map():
"""Send message to OKCupid users."""
recipients = request.form.get("recipients")
recipient_list = recipients.split(", ")
message = request.form.get("message")
username = session["screenname"]
password = session["password"]
send(username, password, recipient_list, message)
return "hello"
@app.route("/sankey")
def d3_page():
""" D3 and Chart.js machine learning results """
return render_template("sankey.html")
@app.route("/markov")
def markov():
""" Change self-summary with Markov Chains page """
orientations = db.session.query(Orientation).order_by(Orientation.orientation).all()
genders = db.session.query(Gender).order_by(Gender.gender).all()
locations = db.session.query(Location).all()
adjectives = db.session.query(Adjective).distinct(Adjective.adjective).order_by(Adjective.adjective).all()
def adjectiveg(adjectives):
for adjective in adjectives:
yield adjective.adjective.strip("\"#$%&'()*+-/:;<=>@[\\]^_`{|}~1234567890")
adjective_generator = adjectiveg(adjectives)
return render_template("markov.html", orientations=orientations, genders=genders, locations=locations, adjective_generator=adjective_generator)
@app.route("/markov.json")
def markov_json():
""" Gets JSON - markov chains generated text """
orientation = request.args.get("orientation")
gender = request.args.get("gender")
age = request.args.get("age")
age_list = re.split(' \- ',age)
age_min, age_max = age_list
location = request.args.get("location")
n = int(request.args.get("randomness"))
adjective1 = request.args.get("adjective1")
adjective2 = request.args.get("adjective2")
adjective3 = request.args.get("adjective3")
adjective_list = [adjective1, adjective2, adjective3]
text_string = get_input_text(orientation, gender, location, age_min, age_max, adjective_list, n)
if text_string == "invalid search results":
return text_string
else:
chains = make_chains(text_string, n)
text = make_text(chains)
return text
@app.route("/markov-adjectives.json")
def markov_adjective_json():
""" Gets JSON to populate adjective lists for dropdowns """
orientation = request.args.get("orientation")
gender = request.args.get("gender")
age = request.args.get("age")
age_list = re.split(' \- ',age)
age_min, age_max = age_list
location = request.args.get("location")
adjectives = db.session.query(Adjective.adjective.distinct()).join(Profile).filter(Profile.orientation.like(
"%"+orientation+"%")).filter(Profile.location==location).filter(Profile.gender.like("%"+gender+"%")).filter(
Profile.age>=age_min).filter(Profile.age<=age_max).order_by(Adjective.adjective).all()
adjective_list = [adjective[0] for adjective in adjectives]
return json.dumps(adjective_list)
@app.route("/add-to-profile.json", methods=["POST"])
def add_to_profile_json():
""" Adds Markov Chain self-summary to OkCupid profile """
text = request.form.get("text")
screenname = session["screenname"]
password = session["password"]
session_ = Session.login(screenname, password)
user = User(session=session_)
user.profile.essays.self_summary = text
return "success"
@app.route("/source.json")
def get_words_for_source():
""" Gets JSON to populate words for source """
source_label = request.args.get("source")
source = create_self_summary_words(source_label)
return json.dumps(source)
@app.route("/target.json")
def get_words_for_target():
""" Gets JSON to populate words for target """
target_label = request.args.get("target")
target = create_message_me_if_words(target_label)
return json.dumps(target)
@app.route("/source-chart.json")
def get_stats_for_source_chart():
""" Gets JSON with source informaiton for CHart.js charts """
source_label = request.args.get("source")
gender_element = request.args.get("genderElement")
gender_comment_element = request.args.get("genderCommentElement")
gender, gender_comment_info = prepare_data(source_label, Profile.gender, "source")
orientation_element = request.args.get("orientationElement")
orientation_comment_element = request.args.get("orientationCommentElement")
orientation, orientation_comment_info = prepare_data(source_label, Profile.orientation, "source")
age_element = request.args.get('ageElement')
age_comment_element = request.args.get("ageCommentElement")
age, age_comment_info = prepare_data(source_label, Profile.age, "source")
stats = {"gender": {"identifier": gender_element, "dataPoints": gender, "commentInfo": gender_comment_info, "commentElement": gender_comment_element},
"orientation": {"identifier": orientation_element, "dataPoints": orientation, "commentInfo":orientation_comment_info, "commentElement": orientation_comment_element},
"age": {"identifier": age_element, "dataPoints": age, "commentInfo": age_comment_info, "commentElement": age_comment_element}}
return json.dumps(stats)
@app.route("/target-chart.json")
def get_stats_for_target_chart():
""" Gets JSON with target information for Chart.js charts """
target_label = request.args.get("target")
gender_element = request.args.get("genderElement")
gender_comment_element = request.args.get("genderCommentElement")
gender, gender_comment_info = prepare_data(target_label, Profile.gender, "target")
orientation_element = request.args.get("orientationElement")
orientation_comment_element = request.args.get("orientationCommentElement")
orientation, orientation_comment_info = prepare_data(target_label, Profile.orientation, "target")
age_element = request.args.get('ageElement')
age_comment_element = request.args.get("ageCommentElement")
age, age_comment_info = prepare_data(target_label, Profile.age, "target")
stats = {"gender": {"identifier": gender_element, "dataPoints": gender, "commentInfo": gender_comment_info, "commentElement": gender_comment_element},
"orientation": {"identifier": orientation_element, "dataPoints": orientation, "commentInfo":orientation_comment_info, "commentElement": orientation_comment_element},
"age": {"identifier": age_element, "dataPoints": age, "commentInfo": age_comment_info, "commentElement": age_comment_element}}
return json.dumps(stats)
if __name__ == "__main__":
app.debug = True
connect_to_db(app)
app.run()
import sys
if sys.argv[-1] == "jstest":
JS_TESTING_MODE = False | mit |
Notamaniac/yogstation | bot/requests/api.py | 188 | 5419 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| agpl-3.0 |
Therp/odoo | addons/l10n_tr/__init__.py | 429 | 1050 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kholidfu/django | django/contrib/gis/db/backends/oracle/schema.py | 608 | 4050 | from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super(OracleGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super(OracleGISSchemaEditor, self).column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super(OracleGISSchemaEditor, self).create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super(OracleGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(OracleGISSchemaEditor, self).add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super(OracleGISSchemaEditor, self).remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (model._meta.db_table, field.column), 30)
| bsd-3-clause |
mozilla/zamboni | mkt/site/tests/test_utils_.py | 9 | 8440 | # -*- coding: utf-8 -*-
import os
import tempfile
import unittest
from os import path
from django.conf import settings
from django.core.cache import cache
from django.core.validators import ValidationError
import mock
from nose.tools import assert_raises, eq_, raises
from mkt.site.storage_utils import (LocalFileStorage, copy_stored_file,
local_storage, private_storage,
public_storage, storage_is_remote)
from mkt.site.tests import TestCase
from mkt.site.utils import (ImageCheck, cache_ns_key, escape_all, resize_image,
rm_local_tmp_dir, slug_validator, slugify)
def get_image_path(name):
return path.join(settings.ROOT, 'mkt', 'site', 'tests', 'images', name)
class TestAnimatedImages(TestCase):
def test_animated_images(self):
img = ImageCheck(open(get_image_path('animated.png')))
assert img.is_animated()
img = ImageCheck(open(get_image_path('non-animated.png')))
assert not img.is_animated()
img = ImageCheck(open(get_image_path('animated.gif')))
assert img.is_animated()
img = ImageCheck(open(get_image_path('non-animated.gif')))
assert not img.is_animated()
def test_junk(self):
img = ImageCheck(open(__file__, 'rb'))
assert not img.is_image()
img = ImageCheck(open(get_image_path('non-animated.gif')))
assert img.is_image()
u = u'Ελληνικά'
def test_slug_validator():
eq_(slug_validator(u.lower()), None)
eq_(slug_validator('-'.join([u.lower(), u.lower()])), None)
assert_raises(ValidationError, slug_validator, '234.add')
assert_raises(ValidationError, slug_validator, 'a a a')
assert_raises(ValidationError, slug_validator, 'tags/')
def test_slugify():
x = '-'.join([u, u])
y = ' - '.join([u, u])
def check(x, y):
eq_(slugify(x), y)
slug_validator(slugify(x))
s = [
('xx x - "#$@ x', 'xx-x-x'),
(u'Bän...g (bang)', u'bäng-bang'),
(u, u.lower()),
(x, x.lower()),
(y, x.lower()),
(' a ', 'a'),
('tags/', 'tags'),
('holy_wars', 'holy_wars'),
# I don't really care what slugify returns. Just don't crash.
(u'x荿', u'x\u837f'),
(u'ϧ蒬蓣', u'\u03e7\u84ac\u84e3'),
(u'¿x', u'x'),
]
for val, expected in s:
yield check, val, expected
def test_resize_image():
# src and dst shouldn't be the same.
assert_raises(Exception, resize_image, 't', 't', 'z')
def test_resize_transparency():
src = get_image_path('transparent.png')
dest = tempfile.mkstemp(dir=settings.TMP_PATH)[1]
expected = src.replace('.png', '-expected.png')
if storage_is_remote():
copy_stored_file(src, src, src_storage=local_storage,
dst_storage=private_storage)
try:
resize_image(src, dest, (32, 32), remove_src=False)
with public_storage.open(dest) as dfh:
with open(expected) as efh:
assert dfh.read() == efh.read()
finally:
if public_storage.exists(dest):
public_storage.delete(dest)
class TestLocalFileStorage(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.stor = LocalFileStorage()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def test_read_write(self):
fn = os.path.join(self.tmp, 'somefile.txt')
with self.stor.open(fn, 'w') as fd:
fd.write('stuff')
with self.stor.open(fn, 'r') as fd:
eq_(fd.read(), 'stuff')
def test_non_ascii_filename(self):
fn = os.path.join(self.tmp, u'Ivan Krsti\u0107.txt')
with self.stor.open(fn, 'w') as fd:
fd.write('stuff')
with self.stor.open(fn, 'r') as fd:
eq_(fd.read(), 'stuff')
def test_non_ascii_content(self):
fn = os.path.join(self.tmp, 'somefile.txt')
with self.stor.open(fn, 'w') as fd:
fd.write(u'Ivan Krsti\u0107.txt'.encode('utf8'))
with self.stor.open(fn, 'r') as fd:
eq_(fd.read().decode('utf8'), u'Ivan Krsti\u0107.txt')
def test_make_file_dirs(self):
dp = os.path.join(self.tmp, 'path', 'to')
self.stor.open(os.path.join(dp, 'file.txt'), 'w').close()
assert os.path.exists(self.stor.path(dp)), (
'Directory not created: %r' % dp)
def test_do_not_make_file_dirs_when_reading(self):
fpath = os.path.join(self.tmp, 'file.txt')
with open(fpath, 'w') as fp:
fp.write('content')
# Make sure this doesn't raise an exception.
self.stor.open(fpath, 'r').close()
def test_make_dirs_only_once(self):
dp = os.path.join(self.tmp, 'path', 'to')
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fd:
fd.write('stuff')
# Make sure it doesn't try to make the dir twice
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fd:
fd.write('stuff')
with self.stor.open(os.path.join(dp, 'file.txt'), 'r') as fd:
eq_(fd.read(), 'stuff')
def test_delete_empty_dir(self):
dp = os.path.join(self.tmp, 'path')
os.mkdir(dp)
self.stor.delete(dp)
eq_(os.path.exists(dp), False)
@raises(OSError)
def test_cannot_delete_non_empty_dir(self):
dp = os.path.join(self.tmp, 'path')
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fp:
fp.write('stuff')
self.stor.delete(dp)
def test_delete_file(self):
dp = os.path.join(self.tmp, 'path')
fn = os.path.join(dp, 'file.txt')
with self.stor.open(fn, 'w') as fp:
fp.write('stuff')
self.stor.delete(fn)
eq_(os.path.exists(fn), False)
eq_(os.path.exists(dp), True)
class TestCacheNamespaces(unittest.TestCase):
def setUp(self):
cache.clear()
self.namespace = 'my-test-namespace'
@mock.patch('mkt.site.utils.epoch')
def test_no_preexisting_key(self, epoch_mock):
epoch_mock.return_value = 123456
eq_(cache_ns_key(self.namespace), '123456:ns:%s' % self.namespace)
@mock.patch('mkt.site.utils.epoch')
def test_no_preexisting_key_incr(self, epoch_mock):
epoch_mock.return_value = 123456
eq_(cache_ns_key(self.namespace, increment=True),
'123456:ns:%s' % self.namespace)
@mock.patch('mkt.site.utils.epoch')
def test_key_incr(self, epoch_mock):
epoch_mock.return_value = 123456
cache_ns_key(self.namespace) # Sets ns to 123456
ns_key = cache_ns_key(self.namespace, increment=True)
expected = '123457:ns:%s' % self.namespace
eq_(ns_key, expected)
eq_(cache_ns_key(self.namespace), expected)
class TestEscapeAll(unittest.TestCase):
def test_basics(self):
x = '-'.join([u, u])
y = ' - '.join([u, u])
tests = [
('<script>alert("BALL SO HARD")</script>',
'<script>alert("BALL SO HARD")</script>'),
(u'Bän...g (bang)', u'Bän...g (bang)'),
(u, u),
(x, x),
(y, y),
(u'x荿', u'x\u837f'),
(u'ϧ蒬蓣', u'\u03e7\u0383\u84ac\u84e3'),
(u'¿x', u'¿x'),
]
for val, expected in tests:
eq_(escape_all(val), expected)
def test_nested(self):
value = '<script>alert("BALL SO HARD")</script>'
expected = '<script>alert("BALL SO HARD")</script>'
test = {
'string': value,
'dict': {'x': value},
'list': [value],
'bool': True,
}
res = escape_all(test)
eq_(res['string'], expected)
eq_(res['dict'], {'x': expected})
eq_(res['list'], [expected])
eq_(res['bool'], True)
def test_without_linkify(self):
value = '<button>http://firefox.com</button>'
expected = '<button>http://firefox.com</button>'
test = {
'string': value,
'dict': {'x': value},
'list': [value],
'bool': True,
}
res = escape_all(test, linkify=False)
eq_(res['string'], expected)
eq_(res['dict'], {'x': expected})
eq_(res['list'], [expected])
eq_(res['bool'], True)
| bsd-3-clause |
rcbops/python-django-buildpackage | django/contrib/comments/views/moderation.py | 307 | 5037 | from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required, permission_required
from utils import next_redirect, confirmation_view
from django.contrib import comments
from django.contrib.comments import signals
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: `comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request.POST.copy(), next, delete_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: `comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request.POST.copy(), next, approve_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into seperate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
# Confirmation views.
flag_done = confirmation_view(
template = "comments/flagged.html",
doc = 'Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template = "comments/deleted.html",
doc = 'Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template = "comments/approved.html",
doc = 'Displays a "comment was approved" success page.'
)
| bsd-3-clause |
drcapulet/sentry | src/sentry/models/team.py | 11 | 6254 | """
sentry.models.team
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import warnings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
class TeamManager(BaseManager):
def get_for_user(self, organization, user, access=None, with_projects=False):
"""
Returns a list of all teams a user has some level of access to.
Each <Team> returned has an ``access_type`` attribute which holds the
OrganizationMemberType value.
"""
from sentry.models import (
OrganizationMember, OrganizationMemberTeam,
OrganizationMemberType, Project
)
if not user.is_authenticated():
return []
base_team_qs = self.filter(
organization=organization,
status=TeamStatus.VISIBLE
)
if user.is_superuser or (settings.SENTRY_PUBLIC and access is None):
inactive = list(OrganizationMemberTeam.objects.filter(
organizationmember__user=user,
organizationmember__organization=organization,
is_active=False,
).values_list('team', flat=True))
team_list = base_team_qs
if inactive:
team_list = team_list.exclude(id__in=inactive)
team_list = list(team_list)
if user.is_superuser:
access = OrganizationMemberType.OWNER
else:
access = OrganizationMemberType.MEMBER
for team in team_list:
team.access_type = access
else:
om_qs = OrganizationMember.objects.filter(
user=user,
organization=organization,
)
if access is not None:
om_qs = om_qs.filter(type__lte=access)
try:
om = om_qs.get()
except OrganizationMember.DoesNotExist:
team_qs = self.none()
else:
team_qs = om.get_teams()
for team in team_qs:
team.access_type = om.type
team_list = set(team_qs)
results = sorted(team_list, key=lambda x: x.name.lower())
if with_projects:
# these kinds of queries make people sad :(
for idx, team in enumerate(results):
project_list = list(Project.objects.get_for_user(
team=team,
user=user,
_skip_team_check=True
))
results[idx] = (team, project_list)
return results
# TODO(dcramer): pull in enum library
class TeamStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Team(Model):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
organization = FlexibleForeignKey('sentry.Organization')
slug = models.SlugField()
name = models.CharField(max_length=64)
status = BoundedPositiveIntegerField(choices=(
(TeamStatus.VISIBLE, _('Active')),
(TeamStatus.PENDING_DELETION, _('Pending Deletion')),
(TeamStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), default=TeamStatus.VISIBLE)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = TeamManager(cache_fields=(
'pk',
'slug',
))
class Meta:
app_label = 'sentry'
db_table = 'sentry_team'
unique_together = (('organization', 'slug'),)
__repr__ = sane_repr('slug', 'owner_id', 'name')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
slugify_instance(self, self.name, organization=self.organization)
super(Team, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-team-dashboard', args=[
self.organization.slug,
self.slug,
]))
def get_owner_name(self):
if not self.owner:
return None
if self.owner.first_name:
return self.owner.first_name
if self.owner.email:
return self.owner.email.split('@', 1)[0]
return self.owner.username
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
Q(organizationmemberteam__team=self) |
Q(has_global_access=True),
user__is_active=True,
).exclude(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=False,
organizationmemberteam__team=self,
).values('id')
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Team.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(
user=user,
)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'slug': self.slug,
'name': self.name,
'status': self.status,
}
| bsd-3-clause |
xcbat/vnpy | vnpy/trader/gateway/shzdGateway/shzdGateway.py | 7 | 27260 | # encoding: UTF-8
'''
vn.shzd的gateway接入
1. 期权合约数量太多,为了方便起见默认接口只接收期货合约数据
2. 直达接口的撤单操作也被视作一个独立的委托,但是在vn.trader中选择忽略
3. 持仓全部平光后,再次查询时会没有该合约的推送(和CTP不同),为了避免最后平仓
不更新的情况,使用缓存机制来处理
'''
import os
import json
from copy import copy
from datetime import datetime
from vnpy.api.shzd import ShzdApi
from vnpy.trader.vtGateway import *
from vnpy.trader.vtFunction import getJsonPath
# 以下为一些VT类型和SHZD类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = '1'
priceTypeMap[PRICETYPE_MARKETPRICE] = '2'
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = '1'
directionMap[DIRECTION_SHORT] = '2'
directionMapReverse = {v: k for k, v in directionMap.items()}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_HKEX] = 'HKEX'
exchangeMap[EXCHANGE_CME] = 'CME'
exchangeMap[EXCHANGE_ICE] = 'ICE'
exchangeMap[EXCHANGE_LME] = 'LME'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 产品类型映射
productClassMap = {}
productClassMap[PRODUCT_FUTURES] = 'F'
productClassMap[PRODUCT_OPTION] = 'O'
productClassMapReverse = {v:k for k,v in productClassMap.items()}
# 委托状态映射
orderStatusMapReverse = {}
orderStatusMapReverse['2'] = STATUS_NOTTRADED
orderStatusMapReverse['3'] = STATUS_PARTTRADED
orderStatusMapReverse['4'] = STATUS_ALLTRADED
orderStatusMapReverse['5'] = STATUS_CANCELLED
orderStatusMapReverse['6'] = STATUS_CANCELLED
########################################################################
class ShzdGateway(VtGateway):
"""SHZD接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='SHZD'):
"""Constructor"""
super(ShzdGateway, self).__init__(eventEngine, gatewayName)
self.api = ShzdGatewayApi(self)
self.qryEnabled = False # 是否要启动循环查询
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
frontAddress = str(setting['frontAddress'])
frontPort = int(setting['frontPort'])
marketAddress = str(setting['marketAddress'])
marketPort = int(setting['marketPort'])
userId = str(setting['userId'])
userPwd = str(setting['userPwd'])
except KeyError:
self.writeLog(u'连接配置缺少字段,请检查')
return
# 创建行情和交易接口对象
self.api.connect(userId, userPwd,
frontAddress, frontPort,
marketAddress, marketPort)
# 初始化并启动查询
self.initQuery()
#----------------------------------------------------------------------
def writeLog(self, logContent):
"""记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = logContent
self.onLog(log)
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.api.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.api.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.api.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.api.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.close() # 释放接口对象
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class ShzdGatewayApi(ShzdApi):
"""直达接口的继承实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(ShzdGatewayApi, self).__init__()
self.gateway = gateway
self.gatewayName = gateway.gatewayName
self.userId = EMPTY_STRING # 用户名
self.accountNo = EMPTY_STRING # 查询等用的单一账号
self.accountNoList = [] # 账号列表
self.tradeCallbacks = {} # 交易回调函数映射
self.marketCallbacks = {} # 行情回调函数映射
# 委托相关
self.localNo = EMPTY_INT # 本地委托号
self.orderDict = {} # key为str(localNo),value为委托对象
self.orderNoDict = {} # key为OrderNo,value为localNo
self.localNoDict = {} # key为str(localNo),value为(SystemNo, OrderNo)
self.cancelDict = {} # key为等待撤单的str(localNo),value为CancelOrderReq
# 委托号前缀
n = datetime.now()
self.orderPrefix = n.strftime("%H%M%S.")
# 持仓缓存
self.posDict = {} # key为vtPositionName,value为VtPositionData
# 是否进行了初始化
self.inited = False
self.initCallbacks()
#----------------------------------------------------------------------
def initCallbacks(self):
"""初始化回调函数映射"""
# 行情推送
self.marketCallbacks['MA1'] = self.onMarketData
# 登录和查询回报
self.tradeCallbacks['A1'] = self.onLogin
self.tradeCallbacks['AC1'] = self.onQryAccount
self.tradeCallbacks['OS1'] = self.onQryPosition
self.tradeCallbacks['HY'] = self.onQryContract
self.tradeCallbacks['ORS1'] = self.onQryOrder
self.tradeCallbacks['FS1'] = self.onTrade
# 下单和撤单确认
self.tradeCallbacks['O1'] = self.onSendOrder
self.tradeCallbacks['C1'] = self.onCancelOrder
# 成交委托推送
self.tradeCallbacks['O3'] = self.onTrade
self.tradeCallbacks['OST'] = self.onOrder
#----------------------------------------------------------------------
def onReceiveErrorInfo(self, errcode, errmsg):
"""错误推送回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = errcode
err.errorMsg = errmsg
self.gateway.onError(err)
#----------------------------------------------------------------------
def onReceiveMarketInfo(self, data):
"""行情推送回报"""
func = self.marketCallbacks.get(data['msgtype'], None)
if func:
func(data)
#----------------------------------------------------------------------
def onReceiveTradeInfo(self, data):
"""交易推送回报"""
func = self.tradeCallbacks.get(data['msgtype'], None)
if func:
func(data)
#----------------------------------------------------------------------
def onMarketData(self, data):
"""行情推送"""
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['307']
tick.exchange = exchangeMapReverse.get(data['306'], EXCHANGE_UNKNOWN)
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.volume = int(data['513'])
# LME行情没有持仓量数据
if data['306'] != 'LME':
tick.openInterest = int(data['514'])
dt = data['512'].split(' ')
tick.time = dt[1]
tick.date = dt[0].replace('_', '')
try:
tick.lastPrice = float(data['504'])
tick.openPrice = float(data['508'])
tick.highPrice = float(data['506'])
tick.lowPrice = float(data['507'])
tick.preClosePrice = float(data['509'])
# 可以实现5档深度
tick.bidPrice1 = float(data['500'])
tick.bidPrice2 = float(data['515'])
tick.bidPrice3 = float(data['516'])
tick.bidPrice4 = float(data['517'])
tick.bidPrice5 = float(data['518'])
tick.bidVolume1 = int(data['501'])
tick.bidVolume2 = int(data['519'])
tick.bidVolume3 = int(data['520'])
tick.bidVolume4 = int(data['521'])
tick.bidVolume5 = int(data['522'])
tick.askPrice1 = float(data['502'])
tick.askPrice2 = float(data['523'])
tick.askPrice3 = float(data['524'])
tick.askPrice4 = float(data['525'])
tick.askPrice5 = float(data['526'])
tick.askVolume1 = int(data['503'])
tick.askVolume2 = int(data['527'])
tick.askVolume3 = int(data['528'])
tick.askVolume4 = int(data['529'])
tick.askVolume5 = int(data['530'])
except ValueError:
pass
self.gateway.onTick(tick)
#----------------------------------------------------------------------
def onLogin(self, data):
"""登录成功推送"""
if '11' in data:
self.accountNo = data['11']
self.accountNoList.append(data['11'])
self.loginStatus = True
self.gateway.writeLog(u'账户%s,结算货币%s' %(data['11'], data['200']))
if '410' in data and data['410'] == '1':
self.gateway.writeLog(u'登录成功')
#self.qryContract()
self.qryOrder()
self.qryTrade()
#----------------------------------------------------------------------
def onSendOrder(self, data):
"""下单回报"""
if not data['404'] or data['404'] == '00000':
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = data['307']
order.exchange = exchangeMapReverse.get(data['306'], EXCHANGE_UNKNOWN)
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['305']
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.direction = directionMapReverse.get(data['308'], DIRECTION_UNKNOWN)
order.price = float(data['310'])
order.totalVolume = int(data['309'])
order.status = orderStatusMapReverse.get(data['405'], STATUS_UNKNOWN)
order.orderTime = data['346']
self.orderDict[order.orderID] = order
self.localNoDict[order.orderID] = (data['300'], data['301'])
self.orderNoDict[data['301']] = order.orderID
# 委托查询
if '315' in data:
order.tradedVolume = int(data['315'])
self.gateway.onOrder(copy(order))
# 检查是否要撤单
if order.orderID in self.cancelDict:
self.cancelOrder(self.cancelDict[order.orderID])
del self.cancelDict[order.orderID]
else:
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorID = data['404']
error.errorMsg = u'委托失败'
self.gateway.onError(error)
#----------------------------------------------------------------------
def onCancelOrder(self, data):
"""撤单回报"""
orderID = self.orderNoDict[data['301']]
order = self.orderDict[orderID]
if not data['404'] or data['404'] == '00000':
order.status = STATUS_CANCELLED
order.cancelTime = data['326']
self.gateway.onOrder(copy(order))
else:
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorID = data['404']
error.errorMsg = u'撤单失败'
self.gateway.onError(error)
#----------------------------------------------------------------------
def onTrade(self, data):
"""成交推送"""
if '307' in data:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = data['307']
trade.exchange = exchangeMapReverse.get(data['306'], EXCHANGE_UNKNOWN)
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.tradeID = data['304']
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = data['305']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.direction = directionMapReverse.get(data['308'], DIRECTION_UNKNOWN)
trade.price = float(data['313'])
trade.volume = int(data['315'])
trade.tradeTime = data['326']
self.gateway.onTrade(trade)
elif '410' in data and data['410'] == '1':
self.gateway.writeLog(u'成交查询完成')
#----------------------------------------------------------------------
def onOrder(self, data):
"""委托变化推送"""
orderID = self.orderNoDict.get(data['301'], None)
if orderID:
order = self.orderDict[orderID]
order.tradedVolume = int(data['315'])
if order.tradedVolume > 0:
if order.tradedVolume < order.totalVolume:
order.status = STATUS_PARTTRADED
else:
order.status = STATUS_ALLTRADED
self.gateway.onOrder(copy(order))
#----------------------------------------------------------------------
def onQryOrder(self, data):
"""查询委托回报"""
if '404' in data and data['404'] != '00000':
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorID = data['404']
error.errorMsg = u'查询委托失败'
self.gateway.onError(error)
elif '410' not in data and '307' in data:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = data['307']
order.exchange = exchangeMapReverse.get(data['306'], EXCHANGE_UNKNOWN)
order.vtSymbol = '.'.join([order.symbol, order.exchange])
order.orderID = data['305']
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.direction = directionMapReverse.get(data['308'], DIRECTION_UNKNOWN)
order.price = float(data['310'])
order.totalVolume = int(data['309'])
order.status = orderStatusMapReverse.get(data['405'], STATUS_UNKNOWN)
order.orderTime = data['346']
order.cancelTime = data['326']
self.orderDict[order.orderID] = order
self.localNoDict[order.orderID] = (data['300'], data['301'])
self.orderNoDict[data['301']] = order.orderID
order.tradedVolume = int(data['315'])
self.gateway.onOrder(copy(order))
elif '410' in data and data['410'] == '1':
self.gateway.writeLog(u'委托查询完成')
#----------------------------------------------------------------------
def onQryPosition(self, data):
"""持仓查询返回"""
if '307' in data:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = data['307']
pos.exchange = exchangeMapReverse.get(data['306'], EXCHANGE_UNKNOWN)
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
# 多头仓位
longPosName = '.'.join([pos.vtSymbol, DIRECTION_LONG])
try:
longPos = self.posDict[longPosName]
except KeyError:
longPos = copy(pos)
longPos.direction = DIRECTION_LONG
longPos.vtPositionName = longPosName
self.posDict[longPosName] = longPos
longPos.position = int(data['442'])
longPos.price = float(data['443'])
# 空头仓位
shortPosName = '.'.join([pos.vtSymbol, DIRECTION_SHORT])
try:
shortPos = self.posDict[shortPosName]
except KeyError:
shortPos = copy(pos)
shortPos.direction = DIRECTION_SHORT
shortPos.vtPositionName = shortPosName
self.posDict[shortPosName] = shortPos
shortPos.position = int(data['445'])
shortPos.price = float(data['446'])
# 所有持仓数据推送完成后才向事件引擎中更新持仓数据
if '410' in data and data['410'] == '1':
for pos in self.posDict.values():
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def onQryAccount(self, data):
"""账户资金查询返回"""
if '11' in data:
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = data['11']
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
account.preBalance = float(data['218'])
account.balance = float(data['203'])
account.available = float(data['201'])
account.commission = float(data['221'])
account.margin = float(data['212'])
account.closeProfit = float(data['205'])
account.positionProfit = float(data['216'])
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onQryContract(self, data):
"""查询合约推送"""
if '306' in data and data['306'] in exchangeMapReverse:
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['333'] + data['307']
contract.exchange = exchangeMapReverse.get(data['306'], EXCHANGE_UNKNOWN)
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = data['332'].decode('GBK')
contract.productClass = productClassMapReverse.get(data['335'], '')
contract.size = float(data['336'])
contract.priceTick = float(data['337'])
# 期权合约数量太多,为了方便起见默认接口只接收期货合约数据
if contract.productClass == PRODUCT_FUTURES:
self.gateway.onContract(contract)
if '410' in data and data['410'] == '1':
self.gateway.writeLog(u'合约查询完成')
#----------------------------------------------------------------------
def connect(self, userId, userPwd,
frontAddress, frontPort,
marketAddress, marketPort):
"""连接"""
self.userId = userId
# 初始化接口
n = self.initShZdServer()
if n:
self.gateway.writeLog(u'接口初始化失败,原因%s' %n)
return
else:
self.gateway.writeLog(u'接口初始化成功')
self.inited = True
# 连接交易服务器
n = self.registerFront(frontAddress, frontPort)
if n:
self.gateway.writeLog(u'交易服务器连接失败,原因%s' %n)
return
else:
self.gateway.writeLog(u'交易服务器连接成功')
# 连接行情服务器
n = self.registerMarket(marketAddress, marketPort)
if n:
self.gateway.writeLog(u'行情服务器连接失败,原因%s' %n)
return
else:
self.gateway.writeLog(u'行情服务器连接成功')
# 登录
req = {}
req['msgtype'] = 'A'
req['12'] = 'demo000604'
req['16'] = '888888'
self.shzdSendInfoToTrade(req)
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
req = {}
req['msgtype'] = 'MA'
req['11'] = self.accountNo
req['201'] = '+'
req['307'] = ','.join([exchangeMap[subscribeReq.exchange], subscribeReq.symbol])
self.shzdSendInfoToMarket(req)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
req = {}
req['msgtype'] = 'O'
req['12'] = self.userId
req['11'] = self.accountNo
req['306'] = exchangeMap.get(orderReq.exchange, '')
req['307'] = orderReq.symbol
req['308'] = directionMap.get(orderReq.direction, '')
req['309'] = str(orderReq.volume)
req['310'] = str(orderReq.price)
req['401'] = priceTypeMap.get(orderReq.priceType, '')
self.localNo += 1
req['305'] = self.orderPrefix + str(self.localNo).rjust(10, '0')
self.shzdSendInfoToTrade(req)
vtOrderID = '.'.join([self.gatewayName, str(self.localNo)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelReq):
"""撤单"""
tmp = self.localNoDict.get(cancelReq.orderID, None)
if tmp:
systemNo = tmp[0]
orderNo = tmp[1]
order = self.orderDict[cancelReq.orderID]
req = {}
req['msgtype'] = 'C'
req['12'] = self.userId
req['11'] = self.accountNo
req['300'] = systemNo
req['301'] = orderNo
req['306'] = exchangeMap.get(order.exchange, '')
req['307'] = order.symbol
req['308'] = directionMap.get(order.direction, '')
req['309'] = str(order.totalVolume)
req['310'] = str(order.price)
req['315'] = str(order.tradedVolume)
self.shzdSendInfoToTrade(req)
else:
self.cancelSet.add(cancelReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
req = {}
req['msgtype'] = 'AC'
req['12'] = self.userId
req['11'] = self.accountNo
self.shzdSendInfoToTrade(req)
#----------------------------------------------------------------------
def qryPosition(self):
"""持仓查询"""
req = {}
req['msgtype'] = 'OS'
req['12'] = self.userId
req['11'] = self.accountNo
self.shzdSendInfoToTrade(req)
# 清空持仓数据
for pos in self.posDict.values():
pos.price = 0
pos.position = 0
#----------------------------------------------------------------------
def qryContract(self):
"""合约查询"""
req = {}
req['msgtype'] = 'HY'
req['11'] = self.accountNo
self.shzdSendInfoToTrade(req)
#----------------------------------------------------------------------
def qryTrade(self):
"""成交查询"""
req = {}
req['msgtype'] = 'FS'
req['12'] = self.userId
req['11'] = self.accountNo
self.shzdSendInfoToTrade(req)
#----------------------------------------------------------------------
def qryOrder(self):
"""委托查询"""
req = {}
req['msgtype'] = 'ORS'
req['12'] = self.userId
req['11'] = self.accountNo
self.shzdSendInfoToTrade(req)
#----------------------------------------------------------------------
def close(self):
"""关闭接口"""
if self.inited:
self.release()
#----------------------------------------------------------------------
def printDict(d):
"""打印字典"""
print '-' * 50
l = d.keys()
l.sort()
for k in l:
print k, ':', d[k]
| mit |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/setuptools/command/develop.py | 49 | 8046 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsError, DistutilsOptionError
import os
import glob
import io
from setuptools.extern import six
from pkg_resources import Distribution, PathMetadata, normalize_path
from setuptools.command.easy_install import easy_install
from setuptools import namespaces
import setuptools
class develop(namespaces.DevelopInstaller, easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
self.uninstall_namespaces()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
template = "Please rename %r to %r before using 'develop'"
args = ei.egg_info, ei.broken_egg_info
raise DistutilsError(template % args)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
egg_link_fn = ei.egg_name + '.egg-link'
self.egg_link = os.path.join(self.install_dir, egg_link_fn)
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = normalize_path(self.egg_base)
egg_path = normalize_path(os.path.join(self.install_dir,
self.egg_path))
if egg_path != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to " + target
)
# Make a distribution for the package's source
self.dist = Distribution(
target,
PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name=ei.egg_name
)
self.setup_path = self._resolve_setup_path(
self.egg_base,
self.install_dir,
self.egg_path,
)
@staticmethod
def _resolve_setup_path(egg_base, install_dir, egg_path):
"""
Generate a path from egg_base back to '.' where the
setup script resides and ensure that path points to the
setup path from $install_dir/$egg_path.
"""
path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
if path_to_setup != os.curdir:
path_to_setup = '../' * (path_to_setup.count('/') + 1)
resolved = normalize_path(
os.path.join(install_dir, egg_path, path_to_setup)
)
if resolved != normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", resolved, normalize_path(os.curdir))
return path_to_setup
def install_for_development(self):
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
# Fixup egg-link and easy-install.pth
ei_cmd = self.get_finalized_command("egg_info")
self.egg_path = build_path
self.dist.location = build_path
# XXX
self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info)
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
self.install_namespaces()
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
egg_link_file = open(self.egg_link)
contents = [line.rstrip() for line in egg_link_file]
egg_link_file.close()
if contents not in ([self.egg_path],
[self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self, dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
with io.open(script_path) as strm:
script_text = strm.read()
self.install_script(dist, script_name, script_text, script_path)
def install_wrapper_scripts(self, dist):
dist = VersionlessRequirement(dist)
return easy_install.install_wrapper_scripts(self, dist)
class VersionlessRequirement(object):
"""
Adapt a pkg_resources.Distribution to simply return the project
name as the 'requirement' so that scripts will work across
multiple versions.
>>> dist = Distribution(project_name='foo', version='1.0')
>>> str(dist.as_requirement())
'foo==1.0'
>>> adapted_dist = VersionlessRequirement(dist)
>>> str(adapted_dist.as_requirement())
'foo'
"""
def __init__(self, dist):
self.__dist = dist
def __getattr__(self, name):
return getattr(self.__dist, name)
def as_requirement(self):
return self.project_name
| mit |
midma101/AndIWasJustGoingToBed | .venv/lib/python2.7/site-packages/openid/consumer/discover.py | 142 | 16062 | # -*- test-case-name: openid.test.test_discover -*-
"""Functions to discover OpenID endpoints from identifiers.
"""
__all__ = [
'DiscoveryFailure',
'OPENID_1_0_NS',
'OPENID_1_0_TYPE',
'OPENID_1_1_TYPE',
'OPENID_2_0_TYPE',
'OPENID_IDP_2_0_TYPE',
'OpenIDServiceEndpoint',
'discover',
]
import urlparse
from openid import oidutil, fetchers, urinorm
from openid import yadis
from openid.yadis.etxrd import nsTag, XRDSError, XRD_NS_2_0
from openid.yadis.services import applyFilter as extractServices
from openid.yadis.discover import discover as yadisDiscover
from openid.yadis.discover import DiscoveryFailure
from openid.yadis import xrires, filters
from openid.yadis import xri
from openid.consumer import html_parse
OPENID_1_0_NS = 'http://openid.net/xmlns/1.0'
OPENID_IDP_2_0_TYPE = 'http://specs.openid.net/auth/2.0/server'
OPENID_2_0_TYPE = 'http://specs.openid.net/auth/2.0/signon'
OPENID_1_1_TYPE = 'http://openid.net/signon/1.1'
OPENID_1_0_TYPE = 'http://openid.net/signon/1.0'
from openid.message import OPENID1_NS as OPENID_1_0_MESSAGE_NS
from openid.message import OPENID2_NS as OPENID_2_0_MESSAGE_NS
class OpenIDServiceEndpoint(object):
"""Object representing an OpenID service endpoint.
@ivar identity_url: the verified identifier.
@ivar canonicalID: For XRI, the persistent identifier.
"""
# OpenID service type URIs, listed in order of preference. The
# ordering of this list affects yadis and XRI service discovery.
openid_type_uris = [
OPENID_IDP_2_0_TYPE,
OPENID_2_0_TYPE,
OPENID_1_1_TYPE,
OPENID_1_0_TYPE,
]
def __init__(self):
self.claimed_id = None
self.server_url = None
self.type_uris = []
self.local_id = None
self.canonicalID = None
self.used_yadis = False # whether this came from an XRDS
self.display_identifier = None
def usesExtension(self, extension_uri):
return extension_uri in self.type_uris
def preferredNamespace(self):
if (OPENID_IDP_2_0_TYPE in self.type_uris or
OPENID_2_0_TYPE in self.type_uris):
return OPENID_2_0_MESSAGE_NS
else:
return OPENID_1_0_MESSAGE_NS
def supportsType(self, type_uri):
"""Does this endpoint support this type?
I consider C{/server} endpoints to implicitly support C{/signon}.
"""
return (
(type_uri in self.type_uris) or
(type_uri == OPENID_2_0_TYPE and self.isOPIdentifier())
)
def getDisplayIdentifier(self):
"""Return the display_identifier if set, else return the claimed_id.
"""
if self.display_identifier is not None:
return self.display_identifier
if self.claimed_id is None:
return None
else:
return urlparse.urldefrag(self.claimed_id)[0]
def compatibilityMode(self):
return self.preferredNamespace() != OPENID_2_0_MESSAGE_NS
def isOPIdentifier(self):
return OPENID_IDP_2_0_TYPE in self.type_uris
def parseService(self, yadis_url, uri, type_uris, service_element):
"""Set the state of this object based on the contents of the
service element."""
self.type_uris = type_uris
self.server_url = uri
self.used_yadis = True
if not self.isOPIdentifier():
# XXX: This has crappy implications for Service elements
# that contain both 'server' and 'signon' Types. But
# that's a pathological configuration anyway, so I don't
# think I care.
self.local_id = findOPLocalIdentifier(service_element,
self.type_uris)
self.claimed_id = yadis_url
def getLocalID(self):
"""Return the identifier that should be sent as the
openid.identity parameter to the server."""
# I looked at this conditional and thought "ah-hah! there's the bug!"
# but Python actually makes that one big expression somehow, i.e.
# "x is x is x" is not the same thing as "(x is x) is x".
# That's pretty weird, dude. -- kmt, 1/07
if (self.local_id is self.canonicalID is None):
return self.claimed_id
else:
return self.local_id or self.canonicalID
def fromBasicServiceEndpoint(cls, endpoint):
"""Create a new instance of this class from the endpoint
object passed in.
@return: None or OpenIDServiceEndpoint for this endpoint object"""
type_uris = endpoint.matchTypes(cls.openid_type_uris)
# If any Type URIs match and there is an endpoint URI
# specified, then this is an OpenID endpoint
if type_uris and endpoint.uri is not None:
openid_endpoint = cls()
openid_endpoint.parseService(
endpoint.yadis_url,
endpoint.uri,
endpoint.type_uris,
endpoint.service_element)
else:
openid_endpoint = None
return openid_endpoint
fromBasicServiceEndpoint = classmethod(fromBasicServiceEndpoint)
def fromHTML(cls, uri, html):
"""Parse the given document as HTML looking for an OpenID <link
rel=...>
@rtype: [OpenIDServiceEndpoint]
"""
discovery_types = [
(OPENID_2_0_TYPE, 'openid2.provider', 'openid2.local_id'),
(OPENID_1_1_TYPE, 'openid.server', 'openid.delegate'),
]
link_attrs = html_parse.parseLinkAttrs(html)
services = []
for type_uri, op_endpoint_rel, local_id_rel in discovery_types:
op_endpoint_url = html_parse.findFirstHref(
link_attrs, op_endpoint_rel)
if op_endpoint_url is None:
continue
service = cls()
service.claimed_id = uri
service.local_id = html_parse.findFirstHref(
link_attrs, local_id_rel)
service.server_url = op_endpoint_url
service.type_uris = [type_uri]
services.append(service)
return services
fromHTML = classmethod(fromHTML)
def fromXRDS(cls, uri, xrds):
"""Parse the given document as XRDS looking for OpenID services.
@rtype: [OpenIDServiceEndpoint]
@raises XRDSError: When the XRDS does not parse.
@since: 2.1.0
"""
return extractServices(uri, xrds, cls)
fromXRDS = classmethod(fromXRDS)
def fromDiscoveryResult(cls, discoveryResult):
"""Create endpoints from a DiscoveryResult.
@type discoveryResult: L{DiscoveryResult}
@rtype: list of L{OpenIDServiceEndpoint}
@raises XRDSError: When the XRDS does not parse.
@since: 2.1.0
"""
if discoveryResult.isXRDS():
method = cls.fromXRDS
else:
method = cls.fromHTML
return method(discoveryResult.normalized_uri,
discoveryResult.response_text)
fromDiscoveryResult = classmethod(fromDiscoveryResult)
def fromOPEndpointURL(cls, op_endpoint_url):
"""Construct an OP-Identifier OpenIDServiceEndpoint object for
a given OP Endpoint URL
@param op_endpoint_url: The URL of the endpoint
@rtype: OpenIDServiceEndpoint
"""
service = cls()
service.server_url = op_endpoint_url
service.type_uris = [OPENID_IDP_2_0_TYPE]
return service
fromOPEndpointURL = classmethod(fromOPEndpointURL)
def __str__(self):
return ("<%s.%s "
"server_url=%r "
"claimed_id=%r "
"local_id=%r "
"canonicalID=%r "
"used_yadis=%s "
">"
% (self.__class__.__module__, self.__class__.__name__,
self.server_url,
self.claimed_id,
self.local_id,
self.canonicalID,
self.used_yadis))
def findOPLocalIdentifier(service_element, type_uris):
"""Find the OP-Local Identifier for this xrd:Service element.
This considers openid:Delegate to be a synonym for xrd:LocalID if
both OpenID 1.X and OpenID 2.0 types are present. If only OpenID
1.X is present, it returns the value of openid:Delegate. If only
OpenID 2.0 is present, it returns the value of xrd:LocalID. If
there is more than one LocalID tag and the values are different,
it raises a DiscoveryFailure. This is also triggered when the
xrd:LocalID and openid:Delegate tags are different.
@param service_element: The xrd:Service element
@type service_element: ElementTree.Node
@param type_uris: The xrd:Type values present in this service
element. This function could extract them, but higher level
code needs to do that anyway.
@type type_uris: [str]
@raises DiscoveryFailure: when discovery fails.
@returns: The OP-Local Identifier for this service element, if one
is present, or None otherwise.
@rtype: str or unicode or NoneType
"""
# XXX: Test this function on its own!
# Build the list of tags that could contain the OP-Local Identifier
local_id_tags = []
if (OPENID_1_1_TYPE in type_uris or
OPENID_1_0_TYPE in type_uris):
local_id_tags.append(nsTag(OPENID_1_0_NS, 'Delegate'))
if OPENID_2_0_TYPE in type_uris:
local_id_tags.append(nsTag(XRD_NS_2_0, 'LocalID'))
# Walk through all the matching tags and make sure that they all
# have the same value
local_id = None
for local_id_tag in local_id_tags:
for local_id_element in service_element.findall(local_id_tag):
if local_id is None:
local_id = local_id_element.text
elif local_id != local_id_element.text:
format = 'More than one %r tag found in one service element'
message = format % (local_id_tag,)
raise DiscoveryFailure(message, None)
return local_id
def normalizeURL(url):
"""Normalize a URL, converting normalization failures to
DiscoveryFailure"""
try:
normalized = urinorm.urinorm(url)
except ValueError, why:
raise DiscoveryFailure('Normalizing identifier: %s' % (why[0],), None)
else:
return urlparse.urldefrag(normalized)[0]
def normalizeXRI(xri):
"""Normalize an XRI, stripping its scheme if present"""
if xri.startswith("xri://"):
xri = xri[6:]
return xri
def arrangeByType(service_list, preferred_types):
"""Rearrange service_list in a new list so services are ordered by
types listed in preferred_types. Return the new list."""
def enumerate(elts):
"""Return an iterable that pairs the index of an element with
that element.
For Python 2.2 compatibility"""
return zip(range(len(elts)), elts)
def bestMatchingService(service):
"""Return the index of the first matching type, or something
higher if no type matches.
This provides an ordering in which service elements that
contain a type that comes earlier in the preferred types list
come before service elements that come later. If a service
element has more than one type, the most preferred one wins.
"""
for i, t in enumerate(preferred_types):
if preferred_types[i] in service.type_uris:
return i
return len(preferred_types)
# Build a list with the service elements in tuples whose
# comparison will prefer the one with the best matching service
prio_services = [(bestMatchingService(s), orig_index, s)
for (orig_index, s) in enumerate(service_list)]
prio_services.sort()
# Now that the services are sorted by priority, remove the sort
# keys from the list.
for i in range(len(prio_services)):
prio_services[i] = prio_services[i][2]
return prio_services
def getOPOrUserServices(openid_services):
"""Extract OP Identifier services. If none found, return the
rest, sorted with most preferred first according to
OpenIDServiceEndpoint.openid_type_uris.
openid_services is a list of OpenIDServiceEndpoint objects.
Returns a list of OpenIDServiceEndpoint objects."""
op_services = arrangeByType(openid_services, [OPENID_IDP_2_0_TYPE])
openid_services = arrangeByType(openid_services,
OpenIDServiceEndpoint.openid_type_uris)
return op_services or openid_services
def discoverYadis(uri):
"""Discover OpenID services for a URI. Tries Yadis and falls back
on old-style <link rel='...'> discovery if Yadis fails.
@param uri: normalized identity URL
@type uri: str
@return: (claimed_id, services)
@rtype: (str, list(OpenIDServiceEndpoint))
@raises DiscoveryFailure: when discovery fails.
"""
# Might raise a yadis.discover.DiscoveryFailure if no document
# came back for that URI at all. I don't think falling back
# to OpenID 1.0 discovery on the same URL will help, so don't
# bother to catch it.
response = yadisDiscover(uri)
yadis_url = response.normalized_uri
body = response.response_text
try:
openid_services = OpenIDServiceEndpoint.fromXRDS(yadis_url, body)
except XRDSError:
# Does not parse as a Yadis XRDS file
openid_services = []
if not openid_services:
# Either not an XRDS or there are no OpenID services.
if response.isXRDS():
# if we got the Yadis content-type or followed the Yadis
# header, re-fetch the document without following the Yadis
# header, with no Accept header.
return discoverNoYadis(uri)
# Try to parse the response as HTML.
# <link rel="...">
openid_services = OpenIDServiceEndpoint.fromHTML(yadis_url, body)
return (yadis_url, getOPOrUserServices(openid_services))
def discoverXRI(iname):
endpoints = []
iname = normalizeXRI(iname)
try:
canonicalID, services = xrires.ProxyResolver().query(
iname, OpenIDServiceEndpoint.openid_type_uris)
if canonicalID is None:
raise XRDSError('No CanonicalID found for XRI %r' % (iname,))
flt = filters.mkFilter(OpenIDServiceEndpoint)
for service_element in services:
endpoints.extend(flt.getServiceEndpoints(iname, service_element))
except XRDSError:
oidutil.log('xrds error on ' + iname)
for endpoint in endpoints:
# Is there a way to pass this through the filter to the endpoint
# constructor instead of tacking it on after?
endpoint.canonicalID = canonicalID
endpoint.claimed_id = canonicalID
endpoint.display_identifier = iname
# FIXME: returned xri should probably be in some normal form
return iname, getOPOrUserServices(endpoints)
def discoverNoYadis(uri):
http_resp = fetchers.fetch(uri)
if http_resp.status not in (200, 206):
raise DiscoveryFailure(
'HTTP Response status from identity URL host is not 200. '
'Got status %r' % (http_resp.status,), http_resp)
claimed_id = http_resp.final_url
openid_services = OpenIDServiceEndpoint.fromHTML(
claimed_id, http_resp.body)
return claimed_id, openid_services
def discoverURI(uri):
parsed = urlparse.urlparse(uri)
if parsed[0] and parsed[1]:
if parsed[0] not in ['http', 'https']:
raise DiscoveryFailure('URI scheme is not HTTP or HTTPS', None)
else:
uri = 'http://' + uri
uri = normalizeURL(uri)
claimed_id, openid_services = discoverYadis(uri)
claimed_id = normalizeURL(claimed_id)
return claimed_id, openid_services
def discover(identifier):
if xri.identifierScheme(identifier) == "XRI":
return discoverXRI(identifier)
else:
return discoverURI(identifier)
| mit |
ggiscan/OnlineClerk | squashcity/tests/test_db.py | 4 | 2377 | '''
Created on Nov 19, 2015
@author: george
'''
import unittest
from core.model import User
import core.dbman as dbman
from squashcity.model import SquashCityRequest
import sys
from datetime import datetime
class TestDBModel(unittest.TestCase):
def setUp(self):
self.session = dbman.new_session('memory')
self.product = 'SQUASHCITY'
self.user = 'George'
def test_squash(self):
self.create_users_and_products()
self.create_requests()
self.active_product_requests()
self.active_user_requests()
def create_users_and_products(self):
dbman.create_product(self.product, self.session)
dbman.register_user(self.product, self.user, session=self.session)
def create_requests(self):
session = self.session
try:
user = session.query(User).filter(User.id==self.user).one()
user_product = user.user_products[0]
#below request is active
squashcity_req = SquashCityRequest(request_type=self.product,
userproduct_id = user_product.id,
start_date = datetime.now(),
end_date = datetime(2015, 11, 5, 22, 36))
session.add(squashcity_req)
#below request is inactive
squashcity_req = SquashCityRequest(request_type=self.product,
userproduct_id = user_product.id,
closing_date = datetime.now(),
start_date = datetime.now(),
end_date = None)
session.add(squashcity_req)
session.commit()
except:
self.assertTrue(False, sys.exc_info()[1])
def active_product_requests(self):
requests = dbman.active_product_requests(self.product, session=self.session)
self.assertEqual(1, len(requests))
self.assertIsNone(requests[0].closing_date)
def active_user_requests(self):
requests = dbman.active_user_requests(self.user, self.product, session=self.session)
self.assertEqual(1, len(requests))
self.assertIsNone(requests[0].closing_date)
| gpl-2.0 |
ryrzy/p75xx_ICS | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
patnolan33/Midterm_Perception | vendor/googletest/googletest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/sqlalchemy/testing/plugin/pytestplugin.py | 58 | 5836 | try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import pytest
import argparse
import inspect
import collections
import itertools
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(self, parser, namespace,
values, option_string=None):
callback_(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
if hasattr(config, "slaveinput"):
plugin_base.restore_important_follower_config(config.slaveinput)
plugin_base.configure_follower(
config.slaveinput["follower_ident"]
)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(bool(getattr(config.option,
"cov_source", False)))
plugin_base.set_skip_test(pytest.skip.Exception)
def pytest_sessionstart(session):
plugin_base.post_begin()
if has_xdist:
_follower_count = itertools.count(1)
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.slaveinput)
node.slaveinput["follower_ident"] = "test_%s" % next(_follower_count)
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.slaveinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
items[:] = [
item for item in
items if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module):
if sub_cls is not test_class.cls:
list_ = rebuilt_items[test_class.cls]
for inst in pytest.Class(
sub_cls.__name__,
parent=test_class.parent.parent).collect():
list_.extend(inst.collect())
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls])
rebuilt_items[item.parent.cls][:] = []
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(newitems, key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name
))
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(obj):
return pytest.Class(name, parent=collector)
elif inspect.isfunction(obj) and \
isinstance(collector, pytest.Instance) and \
plugin_base.want_method(collector.cls, obj):
return pytest.Function(name, parent=collector)
else:
return []
_current_class = None
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# py.test assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(item, item.parent.module.__name__,
item.parent.cls, item.name)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls)
| mit |
simonwydooghe/ansible | lib/ansible/modules/network/cnos/cnos_reload.py | 52 | 3430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to reload Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_reload
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Perform switch restart on devices running Lenovo CNOS
description:
- This module allows you to restart the switch using the current startup
configuration. The module is usually invoked after the running
configuration has been saved over the startup configuration.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are
written in the main.yml file of the tasks directory.
---
- name: Test Reload
cnos_reload:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Device is Reloading. Please wait..."
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except Exception:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
command = 'reload'
outputfile = module.params['outputfile']
output = ''
cmd = [{'command': command, 'prompt': 'reboot system? (y/n): ',
'answer': 'y'}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg in "Device Response Timed out"):
module.exit_json(changed=True,
msg="Device is Reloading. Please wait...")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
pbrod/numpy | numpy/core/tests/test_getlimits.py | 17 | 4297 | """ Test functions for limits module.
"""
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import assert_equal, assert_, assert_raises
from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
np.complex128]))
for dt1, dt2 in dts:
for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machar', 'machep',
'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
'nmant', 'precision', 'resolution', 'tiny'):
assert_equal(getattr(finfo(dt1), attr),
getattr(finfo(dt2), attr), attr)
assert_raises(ValueError, finfo, 'i4')
class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(iinfo(dt1), attr),
getattr(iinfo(dt2), attr), attr)
assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
" max=3.4028235e+38, dtype=float32)"
assert_equal(repr(np.finfo(np.float32)), expected)
def test_instances():
iinfo(10)
finfo(3.0)
def assert_ma_equal(discovered, ma_like):
# Check MachAr-like objects same as calculated MachAr instances
for key, value in discovered.__dict__.items():
assert_equal(value, getattr(ma_like, key))
if hasattr(value, 'shape'):
assert_equal(value.shape, getattr(ma_like, key).shape)
assert_equal(value.dtype, getattr(ma_like, key).dtype)
def test_known_types():
# Test we are correctly compiling parameters for known types
for ftype, ma_like in ((np.float16, _float_ma[16]),
(np.float32, _float_ma[32]),
(np.float64, _float_ma[64])):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
assert_ma_equal(ld_ma, _float_ma[128])
def test_plausible_finfo():
# Assert that finfo returns reasonable results for all types
for ftype in np.sctypes['float'] + np.sctypes['complex']:
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
| bsd-3-clause |
Kilmannan/HippieStation13 | tools/dmm2tgm/Source/dmm2tgm.py | 149 | 3726 |
import sys
# .dmm format converter, by RemieRichards
# Version 2.0
# Converts the internal structure of a .dmm file to a syntax
# that git can better handle conflicts-wise, it's also fairly human readable!
# Processes Boxstation (tgstation.2.1.3) almost instantly
def convert_map(map_file):
#CHECK FOR PREVIOUS CONVERSION
with open(map_file, "r") as conversion_candidate:
header = conversion_candidate.readline()
if header.find("//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE") != -1:
sys.exit()
return
#ACTUAL CONVERSION
with open(map_file, "r+") as unconverted_map:
characters = unconverted_map.read()
converted_map = ""
in_object_block = False #()
in_variable_block = False #{}
in_quote_block = False #''
in_double_quote_block = False #""
for char in characters:
if not in_quote_block: #Checking for things like "Flashbangs (Warning!)" Because we only care about ({'";, that are used as byond syntax, not strings
if not in_double_quote_block:
if not in_variable_block:
if char == "(":
in_object_block = True
char = char + "\n"
if char == ")":
in_object_block = False
if char == ",":
char = char + "\n"
if char == "{":
in_variable_block = True
if in_object_block:
char = char + "\n\t"
if char == "}":
in_variable_block = False
if in_object_block:
char = "\n\t" + char
if char == ";":
char = char + "\n\t"
if char == "\"":
if in_double_quote_block:
in_double_quote_block = False
else:
in_double_quote_block = True
if char == "'":
if not in_double_quote_block:
if in_quote_block:
in_quote_block = False
else:
in_quote_block = True
converted_map = converted_map + char
#OVERWRITE MAP FILE WITH CONVERTED MAP STRING
with open(map_file, "r+") as final_converted_map:
final_converted_map.write("//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE \n")
final_converted_map.write(converted_map)
sys.exit()
if sys.argv[1]: #Run like dmm2tgm.py "folder/folder/a_map.dmm"
convert_map(sys.argv[1])
| agpl-3.0 |
shipci/sympy | sympy/polys/tests/test_monomials.py | 83 | 3266 | """Tests for tools and arithmetics for monomials of distributed polynomials. """
from sympy.polys.monomials import (
itermonomials, monomial_count,
monomial_mul, monomial_div,
monomial_gcd, monomial_lcm,
monomial_max, monomial_min,
monomial_divides,
Monomial,
)
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.abc import a, b, c, x, y, z
from sympy.core import S
from sympy.utilities.pytest import raises
def test_monomials():
assert itermonomials([], 0) == set([S(1)])
assert itermonomials([], 1) == set([S(1)])
assert itermonomials([], 2) == set([S(1)])
assert itermonomials([], 3) == set([S(1)])
assert itermonomials([x], 0) == set([S(1)])
assert itermonomials([x], 1) == set([S(1), x])
assert itermonomials([x], 2) == set([S(1), x, x**2])
assert itermonomials([x], 3) == set([S(1), x, x**2, x**3])
assert itermonomials([x, y], 0) == set([S(1)])
assert itermonomials([x, y], 1) == set([S(1), x, y])
assert itermonomials([x, y], 2) == set([S(1), x, y, x**2, y**2, x*y])
assert itermonomials([x, y], 3) == \
set([S(1), x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, y*x**2])
def test_monomial_count():
assert monomial_count(2, 2) == 6
assert monomial_count(2, 3) == 10
def test_monomial_mul():
assert monomial_mul((3, 4, 1), (1, 2, 0)) == (4, 6, 1)
def test_monomial_div():
assert monomial_div((3, 4, 1), (1, 2, 0)) == (2, 2, 1)
def test_monomial_gcd():
assert monomial_gcd((3, 4, 1), (1, 2, 0)) == (1, 2, 0)
def test_monomial_lcm():
assert monomial_lcm((3, 4, 1), (1, 2, 0)) == (3, 4, 1)
def test_monomial_max():
assert monomial_max((3, 4, 5), (0, 5, 1), (6, 3, 9)) == (6, 5, 9)
def test_monomial_min():
assert monomial_min((3, 4, 5), (0, 5, 1), (6, 3, 9)) == (0, 3, 1)
def test_monomial_divides():
assert monomial_divides((1, 2, 3), (4, 5, 6)) is True
assert monomial_divides((1, 2, 3), (0, 5, 6)) is False
def test_Monomial():
m = Monomial((3, 4, 1), (x, y, z))
n = Monomial((1, 2, 0), (x, y, z))
assert m.as_expr() == x**3*y**4*z
assert n.as_expr() == x**1*y**2
assert m.as_expr(a, b, c) == a**3*b**4*c
assert n.as_expr(a, b, c) == a**1*b**2
assert m.exponents == (3, 4, 1)
assert m.gens == (x, y, z)
assert n.exponents == (1, 2, 0)
assert n.gens == (x, y, z)
assert m == (3, 4, 1)
assert n != (3, 4, 1)
assert m != (1, 2, 0)
assert n == (1, 2, 0)
assert m[0] == m[-3] == 3
assert m[1] == m[-2] == 4
assert m[2] == m[-1] == 1
assert n[0] == n[-3] == 1
assert n[1] == n[-2] == 2
assert n[2] == n[-1] == 0
assert m[:2] == (3, 4)
assert n[:2] == (1, 2)
assert m*n == Monomial((4, 6, 1))
assert m/n == Monomial((2, 2, 1))
assert m*(1, 2, 0) == Monomial((4, 6, 1))
assert m/(1, 2, 0) == Monomial((2, 2, 1))
assert m.gcd(n) == Monomial((1, 2, 0))
assert m.lcm(n) == Monomial((3, 4, 1))
assert m.gcd((1, 2, 0)) == Monomial((1, 2, 0))
assert m.lcm((1, 2, 0)) == Monomial((3, 4, 1))
assert m**0 == Monomial((0, 0, 0))
assert m**1 == m
assert m**2 == Monomial((6, 8, 2))
assert m**3 == Monomial((9, 12, 3))
raises(ExactQuotientFailed, lambda: m/Monomial((5, 2, 0)))
| bsd-3-clause |
drpngx/tensorflow | tensorflow/contrib/autograph/converters/name_scopes_test.py | 6 | 4456 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for for_canonicalization module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.converters import name_scopes
from tensorflow.contrib.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_basic(self):
def test_fn(l):
"""This should stay here."""
a = 5
l += a
return l
node = self.parse_and_analyze(test_fn, {})
node = name_scopes.transform(node, self.ctx)
with self.compiled(node, ops.name_scope) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
self.assertEqual('This should stay here.', result.test_fn.__doc__)
def test_long_docstring(self):
def test_fn(l):
"""Multi-line docstring.
Args:
l: A thing.
Returns:
l
"""
return l
node = self.parse_and_analyze(test_fn, {})
node = name_scopes.transform(node, self.ctx)
with self.compiled(node, ops.name_scope) as result:
self.assertIn('Multi-line', result.test_fn.__doc__)
self.assertIn('Returns:', result.test_fn.__doc__)
def test_nested_functions(self):
def test_fn(l):
def inner_fn(i):
return i ** 2
l += 4
return inner_fn(l)
node = self.parse_and_analyze(test_fn, {})
node = name_scopes.transform(node, self.ctx)
with self.compiled(node, ops.name_scope) as result:
result_op = result.test_fn(constant_op.constant(1))
first_result_input_name = result_op.op.inputs[0].name
second_result_input_name = result_op.op.inputs[1].name
self.assertIn('test_fn/', first_result_input_name)
self.assertNotIn('inner_fn', first_result_input_name)
self.assertIn('test_fn/inner_fn/', second_result_input_name)
def test_method(self):
class TestClass(object):
def test_fn(self, l):
def inner_fn(i):
return i ** 2
l += 4
return inner_fn(l)
# Note that 'TestClass' was needed in the namespace here.
node = self.parse_and_analyze(
TestClass, {'TestClass': TestClass}, owner_type=TestClass)
node = name_scopes.transform(node, self.ctx)
with self.compiled(node, ops.name_scope) as result:
result_op = result.TestClass().test_fn(constant_op.constant(1))
first_result_input_name = result_op.op.inputs[0].name
second_result_input_name = result_op.op.inputs[1].name
self.assertIn('TestClass/test_fn/', first_result_input_name)
self.assertNotIn('inner_fn', first_result_input_name)
self.assertIn('TestClass/test_fn/inner_fn/', second_result_input_name)
def test_operator(self):
class TestClass(object):
def __call__(self, l):
def inner_fn(i):
return i ** 2
l += 4
return inner_fn(l)
# Note that 'TestClass' was needed in the namespace here.
node = self.parse_and_analyze(
TestClass.__call__, {'TestClass': TestClass}, owner_type=TestClass)
node = name_scopes.transform(node, self.ctx)
with self.compiled(node, ops.name_scope) as result:
result_op = result.__call__(TestClass(), constant_op.constant(1))
first_result_input_name = result_op.op.inputs[0].name
second_result_input_name = result_op.op.inputs[1].name
self.assertIn('call__/', first_result_input_name)
self.assertNotIn('inner_fn', first_result_input_name)
self.assertIn('call__/inner_fn/', second_result_input_name)
if __name__ == '__main__':
test.main()
| apache-2.0 |
thomazs/geraldo | site/newsite/site-geraldo/django/views/decorators/vary.py | 40 | 1174 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
from django.utils.cache import patch_vary_headers
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func)(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func)(inner_func)
| lgpl-3.0 |
suiyuan2009/tensorflow | third_party/llvm/expand_cmake_vars.py | 168 | 2679 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
| apache-2.0 |
morefreeze/scrapy_projects | duokan/duokan/spiders/list.py | 1 | 2084 | # -*- coding: utf-8 -*-
import scrapy
import logging
import glob
import csv
import http.cookiejar
import os
from duokan.spiders.base import BaseSpider, FileSaverMixin
logger = logging.getLogger('duokan')
class ListSpider(BaseSpider, FileSaverMixin):
name = 'list'
def __init__(self):
cj = http.cookiejar.MozillaCookieJar()
# dump cookie with cookies.txt and save as a file
cj.load('morefreeze_all.cookie')
self.cookie = {k.name: k.value for k in cj if k.domain.endswith('duokan.com')}
self.left_page = {}
def start_requests(self):
# check _done file to detect whether book is finish
done_dirs = {os.path.basename(os.path.dirname(dir)) for dir in glob.iglob('url/*/_done')}
with open('duokan.csv', 'r') as f:
r = csv.DictReader(f)
for row in r:
if row['uuid'] not in done_dirs:
yield scrapy.Request('http://www.duokan.com/reader/book_info/%s/medium' % (row['uuid']),
cookies=self.cookie,
callback=self.parse_book_info)
def parse_book_info(self, response):
super().parse_book_info(response)
self.left_page[book_info['book_id']] = len(book_info['pages'])
def parse_page(self, response):
super().parse_page(response)
self.left_page[req.meta['book_id']] -= 1
def save_page(self, response):
req = response.request
if response.status != 200:
logger.warning('no page iss, book_id[%s] page_id[%s]' % (req.meta['book_id'], req.meta['page_id']))
return
dir = os.path.join('data', req.meta['book_id'])
with open(os.path.join(dir, req.meta['page_id']), 'wb') as f:
f.write(response.body)
# def closed(self, reason):
# for book_id in self.left_page:
# if self.left_page[book_id] == 0:
# url_dir = os.path.join('url', book_id)
# with open(os.path(url_dir, '_done'), 'w') as f:
# pass
| mit |
olivierdalang/QGIS | python/plugins/processing/modeler/ModelerParametersDialog.py | 8 | 24575 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerParametersDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import webbrowser
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import (QDialog, QDialogButtonBox, QLabel, QLineEdit,
QFrame, QPushButton, QSizePolicy, QVBoxLayout,
QHBoxLayout, QWidget, QTabWidget, QTextEdit)
from qgis.PyQt.QtGui import QColor
from qgis.core import (Qgis,
QgsProject,
QgsProcessingParameterDefinition,
QgsProcessingModelOutput,
QgsProcessingModelChildAlgorithm,
QgsProcessingModelChildParameterSource,
QgsProcessingOutputDefinition)
from qgis.gui import (QgsGui,
QgsMessageBar,
QgsScrollArea,
QgsFilterLineEdit,
QgsHelp,
QgsProcessingContextGenerator,
QgsProcessingModelerParameterWidget,
QgsProcessingParameterWidgetContext,
QgsPanelWidget,
QgsPanelWidgetStack,
QgsColorButton,
QgsModelChildDependenciesWidget)
from qgis.utils import iface
from processing.gui.wrappers import WidgetWrapperFactory
from processing.gui.wrappers import InvalidParameterValue
from processing.tools.dataobjects import createContext
from processing.gui.wrappers import WidgetWrapper
class ModelerParametersDialog(QDialog):
def __init__(self, alg, model, algName=None, configuration=None):
super().__init__()
self.setObjectName('ModelerParametersDialog')
self.setModal(True)
if iface is not None:
self.setStyleSheet(iface.mainWindow().styleSheet())
# dammit this is SUCH as mess... stupid stable API
self._alg = alg # The algorithm to define in this dialog. It is an instance of QgsProcessingAlgorithm
self.model = model # The model this algorithm is going to be added to. It is an instance of QgsProcessingModelAlgorithm
self.childId = algName # The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self.configuration = configuration
self.context = createContext()
self.setWindowTitle(self._alg.displayName())
self.widget = ModelerParametersWidget(alg, model, algName, configuration, context=self.context, dialog=self)
QgsGui.enableAutoGeometryRestore(self)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok | QDialogButtonBox.Help)
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.reject)
self.buttonBox.helpRequested.connect(self.openHelp)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.widget, 1)
mainLayout.addWidget(self.buttonBox)
self.setLayout(mainLayout)
def setComments(self, text):
self.widget.setComments(text)
def comments(self):
return self.widget.comments()
def setCommentColor(self, color):
self.widget.setCommentColor(color)
def commentColor(self):
return self.widget.commentColor()
def switchToCommentTab(self):
self.widget.switchToCommentTab()
def getAvailableValuesOfType(self, paramType, outTypes=[], dataTypes=[]):
# upgrade paramType to list
if paramType is None:
paramType = []
elif not isinstance(paramType, (tuple, list)):
paramType = [paramType]
if outTypes is None:
outTypes = []
elif not isinstance(outTypes, (tuple, list)):
outTypes = [outTypes]
return self.model.availableSourcesForChild(self.childId, [p.typeName() for p in paramType if
issubclass(p, QgsProcessingParameterDefinition)],
[o.typeName() for o in outTypes if
issubclass(o, QgsProcessingOutputDefinition)], dataTypes)
def resolveValueDescription(self, value):
if isinstance(value, QgsProcessingModelChildParameterSource):
if value.source() == QgsProcessingModelChildParameterSource.StaticValue:
return value.staticValue()
elif value.source() == QgsProcessingModelChildParameterSource.ModelParameter:
return self.model.parameterDefinition(value.parameterName()).description()
elif value.source() == QgsProcessingModelChildParameterSource.ChildOutput:
alg = self.model.childAlgorithm(value.outputChildId())
output_name = alg.algorithm().outputDefinition(value.outputName()).description()
# see if this output has been named by the model designer -- if so, we use that friendly name
for name, output in alg.modelOutputs().items():
if output.childOutputName() == value.outputName():
output_name = name
break
return self.tr("'{0}' from algorithm '{1}'").format(output_name, alg.description())
return value
def setPreviousValues(self):
self.widget.setPreviousValues()
def createAlgorithm(self):
return self.widget.createAlgorithm()
def okPressed(self):
if self.createAlgorithm() is not None:
self.accept()
def openHelp(self):
algHelp = self.widget.algorithm().helpUrl()
if not algHelp:
algHelp = QgsHelp.helpUrl("processing_algs/{}/{}.html#{}".format(
self.widget.algorithm().provider().helpId(), self.algorithm().groupId(),
"{}{}".format(self.algorithm().provider().helpId(), self.algorithm().name()))).toString()
if algHelp not in [None, ""]:
webbrowser.open(algHelp)
class ModelerParametersPanelWidget(QgsPanelWidget):
def __init__(self, alg, model, algName=None, configuration=None, dialog=None, context=None):
super().__init__()
self._alg = alg # The algorithm to define in this dialog. It is an instance of QgsProcessingAlgorithm
self.model = model # The model this algorithm is going to be added to. It is an instance of QgsProcessingModelAlgorithm
self.childId = algName # The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self.configuration = configuration
self.context = context
self.dialog = dialog
self.widget_labels = {}
class ContextGenerator(QgsProcessingContextGenerator):
def __init__(self, context):
super().__init__()
self.processing_context = context
def processingContext(self):
return self.processing_context
self.context_generator = ContextGenerator(self.context)
self.setupUi()
self.params = None
def algorithm(self):
return self._alg
def setupUi(self):
self.showAdvanced = False
self.wrappers = {}
self.algorithmItem = None
self.mainLayout = QVBoxLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout = QVBoxLayout()
self.bar = QgsMessageBar()
self.bar.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.verticalLayout.addWidget(self.bar)
hLayout = QHBoxLayout()
hLayout.setContentsMargins(0, 0, 0, 0)
descriptionLabel = QLabel(self.tr("Description"))
self.descriptionBox = QLineEdit()
self.descriptionBox.setText(self._alg.displayName())
hLayout.addWidget(descriptionLabel)
hLayout.addWidget(self.descriptionBox)
self.verticalLayout.addLayout(hLayout)
line = QFrame()
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
self.verticalLayout.addWidget(line)
widget_context = QgsProcessingParameterWidgetContext()
widget_context.setProject(QgsProject.instance())
if iface is not None:
widget_context.setMapCanvas(iface.mapCanvas())
widget_context.setActiveLayer(iface.activeLayer())
widget_context.setModel(self.model)
widget_context.setModelChildAlgorithmId(self.childId)
self.algorithmItem = QgsGui.instance().processingGuiRegistry().algorithmConfigurationWidget(self._alg)
if self.algorithmItem:
self.algorithmItem.setWidgetContext(widget_context)
self.algorithmItem.registerProcessingContextGenerator(self.context_generator)
if self.configuration:
self.algorithmItem.setConfiguration(self.configuration)
self.verticalLayout.addWidget(self.algorithmItem)
for param in self._alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
self.advancedButton = QPushButton()
self.advancedButton.setText(self.tr('Show advanced parameters'))
self.advancedButton.clicked.connect(
self.showAdvancedParametersClicked)
advancedButtonHLayout = QHBoxLayout()
advancedButtonHLayout.addWidget(self.advancedButton)
advancedButtonHLayout.addStretch()
self.verticalLayout.addLayout(advancedButtonHLayout)
break
for param in self._alg.parameterDefinitions():
if param.isDestination() or param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
wrapper = WidgetWrapperFactory.create_wrapper(param, self.dialog)
self.wrappers[param.name()] = wrapper
wrapper.setWidgetContext(widget_context)
wrapper.registerProcessingContextGenerator(self.context_generator)
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
widget = wrapper
else:
widget = wrapper.widget
if widget is not None:
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
label = wrapper.createLabel()
else:
tooltip = param.description()
widget.setToolTip(tooltip)
label = wrapper.label
self.widget_labels[param.name()] = label
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
label.setVisible(self.showAdvanced)
widget.setVisible(self.showAdvanced)
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(widget)
for output in self._alg.destinationParameterDefinitions():
if output.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
widget = QgsGui.processingGuiRegistry().createModelerParameterWidget(self.model,
self.childId,
output,
self.context)
widget.setDialog(self.dialog)
widget.setWidgetContext(widget_context)
widget.registerProcessingContextGenerator(self.context_generator)
self.wrappers[output.name()] = widget
item = QgsFilterLineEdit()
if hasattr(item, 'setPlaceholderText'):
item.setPlaceholderText(self.tr('[Enter name if this is a final result]'))
label = widget.createLabel()
if label is not None:
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(widget)
label = QLabel(' ')
self.verticalLayout.addWidget(label)
label = QLabel(self.tr('Dependencies'))
self.dependencies_panel = QgsModelChildDependenciesWidget(self, self.model, self.childId)
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(self.dependencies_panel)
self.verticalLayout.addStretch(1000)
self.setPreviousValues()
self.verticalLayout2 = QVBoxLayout()
self.verticalLayout2.setSpacing(2)
self.verticalLayout2.setMargin(0)
self.paramPanel = QWidget()
self.paramPanel.setLayout(self.verticalLayout)
self.scrollArea = QgsScrollArea()
self.scrollArea.setWidget(self.paramPanel)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setFrameStyle(QFrame.NoFrame)
self.verticalLayout2.addWidget(self.scrollArea)
w = QWidget()
w.setLayout(self.verticalLayout2)
self.mainLayout.addWidget(w)
self.setLayout(self.mainLayout)
def showAdvancedParametersClicked(self):
self.showAdvanced = not self.showAdvanced
if self.showAdvanced:
self.advancedButton.setText(self.tr('Hide advanced parameters'))
else:
self.advancedButton.setText(self.tr('Show advanced parameters'))
for param in self._alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
wrapper = self.wrappers[param.name()]
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
wrapper.setVisible(self.showAdvanced)
else:
wrapper.widget.setVisible(self.showAdvanced)
self.widget_labels[param.name()].setVisible(self.showAdvanced)
def setPreviousValues(self):
if self.childId is not None:
alg = self.model.childAlgorithm(self.childId)
self.descriptionBox.setText(alg.description())
for param in alg.algorithm().parameterDefinitions():
if param.isDestination() or param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
value = None
if param.name() in alg.parameterSources():
value = alg.parameterSources()[param.name()]
if isinstance(value, list) and len(value) == 1:
value = value[0]
elif isinstance(value, list) and len(value) == 0:
value = None
wrapper = self.wrappers[param.name()]
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
if value is None:
value = QgsProcessingModelChildParameterSource.fromStaticValue(param.defaultValue())
wrapper.setWidgetValue(value)
else:
if value is None:
value = param.defaultValue()
if isinstance(value,
QgsProcessingModelChildParameterSource) and value.source() == QgsProcessingModelChildParameterSource.StaticValue:
value = value.staticValue()
wrapper.setValue(value)
for output in self.algorithm().destinationParameterDefinitions():
if output.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
model_output_name = None
for name, out in alg.modelOutputs().items():
if out.childId() == self.childId and out.childOutputName() == output.name():
# this destination parameter is linked to a model output
model_output_name = out.name()
break
value = None
if model_output_name is None and output.name() in alg.parameterSources():
value = alg.parameterSources()[output.name()]
if isinstance(value, list) and len(value) == 1:
value = value[0]
elif isinstance(value, list) and len(value) == 0:
value = None
wrapper = self.wrappers[output.name()]
if model_output_name is not None:
wrapper.setToModelOutput(model_output_name)
elif value is not None or output.defaultValue() is not None:
if value is None:
value = QgsProcessingModelChildParameterSource.fromStaticValue(output.defaultValue())
wrapper.setWidgetValue(value)
self.dependencies_panel.setValue(alg.dependencies())
def createAlgorithm(self):
alg = QgsProcessingModelChildAlgorithm(self._alg.id())
if not self.childId:
alg.generateChildId(self.model)
else:
alg.setChildId(self.childId)
alg.setDescription(self.descriptionBox.text())
if self.algorithmItem:
alg.setConfiguration(self.algorithmItem.configuration())
self._alg = alg.algorithm().create(self.algorithmItem.configuration())
for param in self._alg.parameterDefinitions():
if param.isDestination() or param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
try:
wrapper = self.wrappers[param.name()]
if issubclass(wrapper.__class__, WidgetWrapper):
val = wrapper.value()
elif issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
val = wrapper.value()
else:
val = wrapper.parameterValue()
except InvalidParameterValue:
val = None
if isinstance(val, QgsProcessingModelChildParameterSource):
val = [val]
elif not (isinstance(val, list) and all(
[isinstance(subval, QgsProcessingModelChildParameterSource) for subval in val])):
val = [QgsProcessingModelChildParameterSource.fromStaticValue(val)]
valid = True
for subval in val:
if (isinstance(subval, QgsProcessingModelChildParameterSource)
and subval.source() == QgsProcessingModelChildParameterSource.StaticValue
and not param.checkValueIsAcceptable(subval.staticValue())) \
or (subval is None and not param.flags() & QgsProcessingParameterDefinition.FlagOptional):
valid = False
break
if valid:
alg.addParameterSources(param.name(), val)
outputs = {}
for output in self._alg.destinationParameterDefinitions():
if not output.flags() & QgsProcessingParameterDefinition.FlagHidden:
wrapper = self.wrappers[output.name()]
if wrapper.isModelOutput():
name = wrapper.modelOutputName()
if name:
model_output = QgsProcessingModelOutput(name, name)
model_output.setChildId(alg.childId())
model_output.setChildOutputName(output.name())
outputs[name] = model_output
else:
val = wrapper.value()
if isinstance(val, QgsProcessingModelChildParameterSource):
val = [val]
alg.addParameterSources(output.name(), val)
if output.flags() & QgsProcessingParameterDefinition.FlagIsModelOutput:
if output.name() not in outputs:
model_output = QgsProcessingModelOutput(output.name(), output.name())
model_output.setChildId(alg.childId())
model_output.setChildOutputName(output.name())
outputs[output.name()] = model_output
alg.setModelOutputs(outputs)
alg.setDependencies(self.dependencies_panel.value())
return alg
class ModelerParametersWidget(QWidget):
def __init__(self, alg, model, algName=None, configuration=None, dialog=None, context=None):
super().__init__()
self._alg = alg # The algorithm to define in this dialog. It is an instance of QgsProcessingAlgorithm
self.model = model # The model this algorithm is going to be added to. It is an instance of QgsProcessingModelAlgorithm
self.childId = algName # The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self.configuration = configuration
self.context = context
self.dialog = dialog
self.widget = ModelerParametersPanelWidget(alg, model, algName, configuration, dialog, context)
class ContextGenerator(QgsProcessingContextGenerator):
def __init__(self, context):
super().__init__()
self.processing_context = context
def processingContext(self):
return self.processing_context
self.context_generator = ContextGenerator(self.context)
self.setupUi()
self.params = None
def algorithm(self):
return self._alg
def switchToCommentTab(self):
self.tab.setCurrentIndex(1)
self.commentEdit.setFocus()
self.commentEdit.selectAll()
def setupUi(self):
self.mainLayout = QVBoxLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.tab = QTabWidget()
self.mainLayout.addWidget(self.tab)
self.param_widget = QgsPanelWidgetStack()
self.widget.setDockMode(True)
self.param_widget.setMainPanel(self.widget)
self.tab.addTab(self.param_widget, self.tr('Properties'))
self.commentLayout = QVBoxLayout()
self.commentEdit = QTextEdit()
self.commentEdit.setAcceptRichText(False)
self.commentLayout.addWidget(self.commentEdit, 1)
hl = QHBoxLayout()
hl.setContentsMargins(0, 0, 0, 0)
hl.addWidget(QLabel(self.tr('Color')))
self.comment_color_button = QgsColorButton()
self.comment_color_button.setAllowOpacity(True)
self.comment_color_button.setWindowTitle(self.tr('Comment Color'))
self.comment_color_button.setShowNull(True, self.tr('Default'))
hl.addWidget(self.comment_color_button)
self.commentLayout.addLayout(hl)
w2 = QWidget()
w2.setLayout(self.commentLayout)
self.tab.addTab(w2, self.tr('Comments'))
self.setLayout(self.mainLayout)
def setComments(self, text):
self.commentEdit.setPlainText(text)
def comments(self):
return self.commentEdit.toPlainText()
def setCommentColor(self, color):
if color.isValid():
self.comment_color_button.setColor(color)
else:
self.comment_color_button.setToNull()
def commentColor(self):
return self.comment_color_button.color() if not self.comment_color_button.isNull() else QColor()
def setPreviousValues(self):
self.widget.setPreviousValues()
def createAlgorithm(self):
alg = self.widget.createAlgorithm()
if alg:
alg.comment().setDescription(self.comments())
alg.comment().setColor(self.commentColor())
return alg
| gpl-2.0 |
backmari/moose | python/peacock/tests/execute_tab/TerminalTextEdit/test_TerminalTextEdit.py | 2 | 2186 | #!/usr/bin/env python
from peacock.Execute.TerminalTextEdit import TerminalTextEdit
from PyQt5 import QtTest
from PyQt5.QtWidgets import QMenu, QFileDialog
from mock import patch
import tempfile
from peacock.utils import Testing
class MockEvent(object):
def globalPos(self):
return None
class Tests(Testing.PeacockTester):
def testClear(self):
t = TerminalTextEdit()
QtTest.QTest.keyClicks(t, "Test input")
# TerminalTextEdit is read only
self.assertEqual(str(t.toPlainText()), "")
t.setPlainText("Test input")
self.assertEqual(str(t.toPlainText()), "Test input")
t.clear()
self.assertEqual(str(t.toPlainText()), "")
@patch.object(QFileDialog, "getSaveFileName")
def testSave(self, mock_file):
t = TerminalTextEdit()
mock_file.return_value = "/no_exist/no_exist", None
t.setPlainText("Test input")
t.save()
with tempfile.NamedTemporaryFile() as f:
mock_file.return_value = f.name, None
t.save()
with open(f.name, "r") as f:
data = f.read()
self.assertEqual(data, "Test input")
def menuExec(self, point):
if self.do_save_action:
return "Save"
else:
return "Clear"
@patch.object(QMenu, 'exec_')
@patch.object(QMenu, 'addAction')
@patch.object(TerminalTextEdit, "save")
def testContextMenu(self, mock_save, mock_add, mock_exec):
t = TerminalTextEdit()
self.action_count = 0
self.do_save_action = True
mock_add.side_effect = ["Save", "Clear"]
mock_exec.side_effect = self.menuExec
t.setPlainText("Test input")
self.assertEqual(mock_save.call_count, 0)
t.contextMenuEvent(MockEvent())
self.assertEqual(t.toPlainText(), "Test input")
self.assertEqual(mock_save.call_count, 1)
self.do_save_action = False
mock_add.side_effect = ["Save", "Clear"]
t.contextMenuEvent(MockEvent())
self.assertEqual(t.toPlainText(), "")
self.assertEqual(mock_save.call_count, 1)
if __name__ == '__main__':
Testing.run_tests()
| lgpl-2.1 |
nelsongoh/tembotsu | libs/httplib2/test/functional/test_proxies.py | 305 | 2965 | import unittest
import errno
import os
import signal
import subprocess
import tempfile
import nose
import httplib2
from httplib2 import socks
from httplib2.test import miniserver
tinyproxy_cfg = """
User "%(user)s"
Port %(port)s
Listen 127.0.0.1
PidFile "%(pidfile)s"
LogFile "%(logfile)s"
MaxClients 2
StartServers 1
LogLevel Info
"""
class FunctionalProxyHttpTest(unittest.TestCase):
def setUp(self):
if not socks:
raise nose.SkipTest('socks module unavailable')
if not subprocess:
raise nose.SkipTest('subprocess module unavailable')
# start a short-lived miniserver so we can get a likely port
# for the proxy
self.httpd, self.proxyport = miniserver.start_server(
miniserver.ThisDirHandler)
self.httpd.shutdown()
self.httpd, self.port = miniserver.start_server(
miniserver.ThisDirHandler)
self.pidfile = tempfile.mktemp()
self.logfile = tempfile.mktemp()
fd, self.conffile = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
our_cfg = tinyproxy_cfg % {'user': os.getlogin(),
'pidfile': self.pidfile,
'port': self.proxyport,
'logfile': self.logfile}
f.write(our_cfg)
f.close()
try:
# TODO use subprocess.check_call when 2.4 is dropped
ret = subprocess.call(['tinyproxy', '-c', self.conffile])
self.assertEqual(0, ret)
except OSError, e:
if e.errno == errno.ENOENT:
raise nose.SkipTest('tinyproxy not available')
raise
def tearDown(self):
self.httpd.shutdown()
try:
pid = int(open(self.pidfile).read())
os.kill(pid, signal.SIGTERM)
except OSError, e:
if e.errno == errno.ESRCH:
print '\n\n\nTinyProxy Failed to start, log follows:'
print open(self.logfile).read()
print 'end tinyproxy log\n\n\n'
raise
map(os.unlink, (self.pidfile,
self.logfile,
self.conffile))
def testSimpleProxy(self):
proxy_info = httplib2.ProxyInfo(socks.PROXY_TYPE_HTTP,
'localhost', self.proxyport)
client = httplib2.Http(proxy_info=proxy_info)
src = 'miniserver.py'
response, body = client.request('http://localhost:%d/%s' %
(self.port, src))
self.assertEqual(response.status, 200)
self.assertEqual(body, open(os.path.join(miniserver.HERE, src)).read())
lf = open(self.logfile).read()
expect = ('Established connection to host "127.0.0.1" '
'using file descriptor')
self.assertTrue(expect in lf,
'tinyproxy did not proxy a request for miniserver')
| apache-2.0 |
atlashealth/ansible | v1/ansible/runner/action_plugins/assert.py | 138 | 2123 | # Copyright 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible import utils, errors
from ansible.runner.return_data import ReturnData
class ActionModule(object):
''' Fail with custom message '''
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# note: the fail module does not need to pay attention to check mode
# it always runs.
args = {}
if complex_args:
args.update(complex_args)
args.update(utils.parse_kv(module_args))
msg = None
if 'msg' in args:
msg = args['msg']
if not 'that' in args:
raise errors.AnsibleError('conditional required in "that" string')
if not isinstance(args['that'], list):
args['that'] = [ args['that'] ]
for that in args['that']:
test_result = utils.check_conditional(that, self.runner.basedir, inject, fail_on_undefined=True)
if not test_result:
result = dict(
failed = True,
evaluated_to = test_result,
assertion = that,
)
if msg:
result['msg'] = msg
return ReturnData(conn=conn, result=result)
return ReturnData(conn=conn, result=dict(msg='all assertions passed'))
| gpl-3.0 |
DaveRichmond-/vigra | vigranumpy/lib/arraytypes.py | 2 | 81560 | #######################################################################
#
# Copyright 2009-2011 by Ullrich Koethe
#
# This file is part of the VIGRA computer vision library.
# The VIGRA Website is
# http://hci.iwr.uni-heidelberg.de/vigra/
# Please direct questions, bug reports, and contributions to
# ullrich.koethe@iwr.uni-heidelberg.de or
# vigra@informatik.uni-hamburg.de
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#######################################################################
import sys
import copy
import numpy
import ufunc
import collections
import vigranumpycore
from vigranumpycore import AxisType, AxisInfo, AxisTags
def _preserve_doc(f):
npy_doc = eval('numpy.ndarray.%s.__doc__' % f.__name__)
f.__doc__ = ("" if npy_doc is None else npy_doc) + \
("" if f.__doc__ is None else "\n" + f.__doc__)
return f
# a decorator to finalize the return value of a
# dimension-reducing function (e.g. array.max())
def _finalize_reduce_result(f):
def new_f(self, axis=None, out=None):
if type(axis) == str:
axis = self.axistags.index(axis)
res = f(self, axis, out)
if out is None:
if axis is not None:
res.axistags = self._copy_axistags()
del res.axistags[axis]
else:
# this 'else' is necessary because numpy 1.6.0 gives
# type(res) == type(self)
# instead of the desired
# type(res) == self.dtype
# when res is scalar and self is a subclass of ndarray
# (this is probably a bug in numpy, since it works correctly
# when self is a plain ndarray)
res = res.dtype.type(res)
return res
new_f.__doc__ = f.__doc__
return new_f
def _numpyarray_overloaded_function(f, self, axis=None, dtype=None, out=None):
if type(axis) == str:
axis = self.axistags.index(axis)
if axis is None:
return f(self.transposeToOrder('C').view(numpy.ndarray), dtype=dtype, out=out)
else:
res = f(self.view(numpy.ndarray), axis, dtype, out)
if out is None:
res = res.view(VigraArray)
res.axistags = self._copy_axistags()
del res.axistags[axis]
return res
class classproperty(object):
def __get__(self, instance, cls):
if self.__instance_method is not None and instance is not None:
return self.__instance_method(instance)
else:
return self.__class_method(cls)
def __init__(self, class_method, instance_method=None):
self.__class_method = class_method
self.__instance_method = instance_method
def newaxis(axisinfo=AxisInfo()):
'''
Create a new singleton axis via the indexing operator. This works similar to
`numpy.newaxis`, but allows to provide an AxisInfo object for the new axis.
For example::
>>> s = vigra.ScalarImage((width, height))
>>> s.axistags # no channel axis
x y
>>> t = s[..., numpy.newaxis]
>>> t.axistags # with unknown axis type
x y ?
>>> t = s[..., vigra.newaxis(vigra.AxisInfo.c)]
>>> t.axistags # with channel axis
x y c
'''
return axisinfo
def taggedView(array, axistags):
'''
Create a view to the given array with type :class:`~vigra.VigraArray` and the
given axistags. This is essentially a shorthand for::
>>> view = array.view(vigra.VigraArray)
>>> view.axistags = copy.copy(axistags)
if axistags is an instance of AxisTags. Otherwise, the function first attempts
to convert the input to that type by calling VigraArray.defaultAxistags()
'''
if not isinstance(axistags, AxisTags):
axistags = VigraArray.defaultAxistags(axistags)
else:
axistags = copy.copy(axistags)
if array.ndim != len(axistags):
raise RuntimeError('vigra.taggedView(): array.ndim must match len(axistags).')
res = array.view(VigraArray)
res.axistags = axistags
return res
def dropChannelAxis(array):
'''
Return the view created by ``array.``:meth:`~vigra.VigraArray.dropChannelAxis` if
the given array supports that function, or return ``array`` unchanged otherwise.
'''
try:
return array.dropChannelAxis()
except:
return array
# FIXME: This is a workaround for the disabled C++ function for the same purpose.
# Enable the C++ version when boost 1.41 is available on all relevant platforms.
def _AxisTags_fromJSON(json_rep):
'''
Construct a new AxisTags object from the given JSON representation.
This is mainly used to reconstruct arrays from HDF5 datasets with
a suitable axistags attribute (see :func:`~vigra.impex.readHDF5`).
'''
tag_dict = eval(json_rep)
tag_list = []
for tags in tag_dict['axes']:
tags['typeFlags'] = AxisType(tags['typeFlags'])
tag_list.append(AxisInfo(**tags))
return AxisTags(tag_list)
def _AxisTags__reduce__(self):
'''
enable pickling of AxisTags
'''
return _AxisTags_fromJSON, (self.toJSON(),)
AxisTags.__reduce__ = _AxisTags__reduce__
AxisTags.fromJSON = staticmethod(_AxisTags_fromJSON)
AxisTags.fromJSON.__doc__ = _AxisTags_fromJSON.__doc__
# How to construct a VigraArray
#
# case 1: from shape and order or axistags
# conventions: - shape has explicit channel axis
# - 'A' order defaults to 'V' order
# - order implies axistags and vice versa, you cannot provide both
# * look up the array type. If it is a plain ndarray, skip axistags
# * construct array according to order, optionally init with a constant
# * create and assign normalized axistags, if not explicitly given
# * optionally remove a singleton channel dimension (while we know where it is)
# * permute the array by the inverse normalization
# * assign axistags, if explicitly given (check compatibility)
#
# case 2: from another array
# * if taget order is 'A' or source and target order are equal, copy as is (including axistags)
# * otherwise, normalize the shape according to target order and
# remember the normalizing permutation
# * construct array in normalized order
# * permute the array by the inverse normalization
# * copy original data and axistags
_constructArrayFromAxistags = vigranumpycore.constructArrayFromAxistags
def _constructArrayFromOrder(cls, shape, dtype, order, init):
axistags = VigraArray.defaultAxistags(len(shape), order)
return _constructArrayFromAxistags(cls, shape, dtype, axistags, init)
def _constructArrayFromArray(cls, obj, dtype, order, init, axistags):
if order is None:
order = 'A'
if order == 'A':
# we cannot use ndarray.copy('A') here, because this only preserves 'C' and 'F'
# order, whereas any other order is silently transformed into 'C'
# we must also make sure that a singleton channel index has the smallest stride
# (otherwise, strides in the copy may not exactly match those in obj)
strides = list(obj.strides)
try:
channelIndex = obj.channelIndex
if channelIndex < obj.ndim and obj.shape[channelIndex] == 1:
strides[channelIndex] = 0
except:
pass
permutation = list(numpy.array(strides).argsort())
norm_shape = tuple(numpy.array(obj.shape)[permutation])
inverse_permutation = list(numpy.array(permutation).argsort())
array = numpy.ndarray.__new__(cls, norm_shape, dtype, order='F')
array = array.transpose(inverse_permutation)
else:
array = _constructArrayFromOrder(cls, obj.shape, dtype, order, False)
if init:
array[...] = obj
if cls is not numpy.ndarray:
if axistags is not None:
array.axistags = axistags
elif hasattr(array, 'axistags'):
del array.axistags
return array
def _constructArrayFromPickle(_arraypickle, _permutation, _axistags):
reconstructionFunction = _arraypickle[0]
reconstructionArgs = _arraypickle[1]
array = reconstructionFunction(*reconstructionArgs)
array.__setstate__(_arraypickle[2])
array = array.transpose(_permutation)
array.axistags = AxisTags.fromJSON(_axistags)
return array
def _constructArrayFromZMQSocket(socket, flags=0, copy=True, track=False):
metadata = socket.recv_json(flags=flags)
axistags = AxisTags.fromJSON(socket.recv(flags=flags))
data = buffer(socket.recv(flags=flags, copy=copy, track=track))
array = numpy.frombuffer(data, dtype=metadata['dtype']).reshape(metadata['shape'])
array = taggedView(array.transpose(metadata['permutation']), axistags)
return array
##################################################################
class VigraArray(numpy.ndarray):
'''
This class extends numpy.ndarray with the concept of **axistags**
which encode the semantics of the array's axes. VigraArray overrides all
numpy.ndarray methods in order to handle axistags in a sensible way.
In particular, operations acting on two arrays simultaneously (e.g.
addition) will first transpose the arguments such that their axis
ordering matches.
Constructor:
.. method:: VigraArray(obj, dtype=numpy.float32, order=None, init=True, value=None, axistags=None)
:param obj: an array or shape object (see below)
:param dtype: desired element type
:param order: desired memory layout (see below)
:param init: True: initialize the image with zeros; False: do not
initialize the image
:type init: boolean
:param value: initialize the image with this value (overrides init)
:type value: convertible to dtype
:param axistags: the AxisTags object of the new array. The length of
axistags must match the array's shape. It axistags=None,
obj.axistags is used if it exists. Otherwise, a new
axistags object is created by a call to
:meth:`~vigra.VigraArray.defaultAxistags`.
**obj** may be one of the following
* If obj is a numpy.ndarray or a subtype, a copy of obj with the given
dtype, order and resulting class VigraArray is created. If obj.axistags
exists, the new array will have these axistags as well, unless new
axistags are explicitly passed to the constructor.
* If obj is a sequence, it is interpreted as a shape.
* Otherwise, or if shape and axistags are incompatible, an exception
is raised.
**order** can be 'C' (C order), 'F' (Fortran order), 'V' (VIGRA
order), 'A' (any), or None. This parameter controls the order of strides
and axistags (unless axistags are explicit passed into the constructor).
See the :ref:`order definitions <array-order-parameter>` for details. If
'order=None', the order is determined by :attr:`VigraArray.defaultOrder`.
'''
###############################################################
# #
# a number of helper functions related to axistags #
# #
###############################################################
# a number of helper functions related to axistags
# IMPORTANT: do not remove or rename this function, it is called from C++
@classproperty
def defaultOrder(cls):
'''
Get the default axis ordering, currently 'V' (VIGRA order)
'''
return 'V'
# IMPORTANT: do not remove or rename this function, it is called from C++
@staticmethod
def defaultAxistags(tagSpec, order=None, noChannels=False):
'''
Get default axistags for the given specification 'tagSpec'. TagSpec can be the
number of dimensions of the array (``array.ndim``, must be <= 5) or a string
containing a sequence of axis keys (only the default keys 'x', 'y', 'z', 't',
and 'c' are currently supported). The 'order' parameter determines the axis
ordering, see the :ref:`order definitions <array-order-parameter>` for details.
If 'noChannels' is True, there will be no channel axis. Examples::
>>> vigra.VigraArray.defaultAxistags(3)
x y c
>>> vigra.VigraArray.defaultAxistags(4)
x y z c
>>> vigra.VigraArray.defaultAxistags(5)
x y z t c
>>> vigra.VigraArray.defaultAxistags(3, order='C')
y x c
>>> vigra.VigraArray.defaultAxistags(2, noChannels=True)
x y
>>> vigra.VigraArray.defaultAxistags(3, noChannels=True)
x y z
>>> vigra.VigraArray.defaultAxistags(4, noChannels=True)
x y z t
>>> vigra.VigraArray.defaultAxistags('xty')
x t y
>>> vigra.VigraArray.defaultAxistags('xty', order='V')
x y t
'''
if type(tagSpec) == str:
taglist = [eval('AxisInfo.' + k) for k in tagSpec]
else:
start = 1 if noChannels else 0
end = start + tagSpec
taglist = [AxisInfo.c, AxisInfo.x, AxisInfo.y, AxisInfo.z, AxisInfo.t][start:end]
if order is None or order == 'A':
order = VigraArray.defaultOrder
tags = AxisTags(taglist)
if order is not None:
tags.transpose(tags.permutationToOrder(order))
return tags
# IMPORTANT: do not remove or rename this function, it is called from C++
@staticmethod
def _copyValuesImpl(target, source):
try:
target = target.squeeze()
target = target.transposeToNumpyOrder()
except:
pass
try:
source = source.squeeze()
source = source.transposeToNumpyOrder()
except:
pass
try:
compatible = source.axistags.compatible(target.axistags)
except:
compatible = True
if not compatible:
raise RuntimeError("VigraArray._copyValuesImpl(): incompatible axistags")
target[...] = source
# IMPORTANT: do not remove or rename this function, it is called from C++
@staticmethod
def _empty_axistags(ndim):
'''Create an axistags object with non-informative entries.
That is, all axisinfo objects are '?'.
'''
return AxisTags(ndim)
def _copy_axistags(self):
'''Create a copy of 'self.axistags'. If the array doesn't have axistags, _empty_axistags()
will be returned.
'''
return copy.copy(getattr(self, 'axistags', self._empty_axistags(self.ndim)))
def _transform_axistags(self, index):
if hasattr(self, 'axistags'):
return self.axistags.transform(index, self.ndim)
else:
return self._empty_axistags(self.ndim)
def _transpose_axistags(self, *permutation):
'''Create a copy of self.axistags with transposed entries.
'''
if hasattr(self, 'axistags'):
res = copy.copy(self.axistags)
try:
len(permutation[0])
res.transpose(permutation[0])
except:
res.transpose(permutation)
return res
else:
return self._empty_axistags(self.ndim)
###############################################################
# #
# standard array functions #
# #
###############################################################
def __new__(cls, obj, dtype=numpy.float32, order=None, init=True, value=None, axistags=None):
if value is not None:
init = False
if isinstance(obj, numpy.ndarray):
if axistags is None:
if hasattr(obj, 'axistags'):
axistags = copy.copy(obj.axistags)
else:
raise RuntimeError("VigraArray(): axistags must be given when constructing from plain array.")
elif obj.ndim != len(axistags):
raise RuntimeError("VigraArray(): axistags have wrong length.")
if order is None:
res = _constructArrayFromAxistags(cls, obj.shape, dtype, axistags, init)
if init:
res[...] = obj
else:
res = _constructArrayFromArray(cls, obj, dtype, order, init, axistags)
else:
if axistags is None:
if order is None:
order = VigraArray.defaultOrder
elif len(axistags) != len(obj):
raise RuntimeError("VigraArray(): axistags have wrong length.")
if order is None:
res = _constructArrayFromAxistags(cls, obj, dtype, axistags, init)
else:
res = _constructArrayFromOrder(cls, obj, dtype, order, init)
if cls is not numpy.ndarray and axistags is not None:
res.axistags = axistags
if value is not None:
res.fill(value)
return res
__array_priority__ = 15.0
def __array_finalize__(self, obj):
if hasattr(obj, 'axistags'):
self.axistags = obj.axistags
def __copy__(self, order='A'):
result = numpy.ndarray.__copy__(self, order)
result.axistags = result._copy_axistags()
return result
@_preserve_doc
def __deepcopy__(self, memo):
# numpy.ndarray.__deepcopy__ always creates C-order arrays =>
# transpose self accordingly, and transpose back after the copy
result = numpy.ndarray.__deepcopy__(self.transposeToNumpyOrder(), memo)
result = result.transpose(self.permutationFromNumpyOrder())
memo[id(self)] = result
result.__dict__ = copy.deepcopy(self.__dict__, memo)
return result
def __repr__(self):
return "%s(shape=%s, axistags=%s, dtype=%s, data=\n%s)" % \
(self.__class__.__name__, str(self.shape), repr(self.axistags), str(self.dtype), str(self))
def __str__(self):
try:
self = self.transposeToVigraOrder().transpose()
except:
pass
return str(self.view(numpy.ndarray))
def __reduce__(self):
'''
Enable pickling of a VigraArray, including axistags. The stride ordering
will be preserved in the unpickled array. Note that user-defined attributes
will not be saved and restored.
'''
# since the stride ordering is not necessarily preserved by ndarray's pickling
# functions, we need to normalize stride ordering, and permute to the original
# ordering upon reconstruction
pickled = numpy.ndarray.__reduce__(self.transposeToNumpyOrder())
return _constructArrayFromPickle, (pickled, self.permutationFromNumpyOrder(), self.axistags.toJSON())
@staticmethod
def receiveSocket(socket, flags=0, copy=True, track=False):
'''
Reconstruct an array that has been transferred via a ZMQ socket by a call to
VigraArray.sendSocket(). This only works when the 'zmq' module is available.
The meaning of the arguments is described in zmq.Socket.recv().
'''
return _constructArrayFromZMQSocket(socket, flags, copy, track)
###############################################################
# #
# array I/O and display #
# #
###############################################################
def writeImage(self, filename, dtype = '', compression = '', mode='w'):
'''Write an image to a file.
Consult :func:`vigra.impex.writeImage` for detailed documentation'''
import vigra.impex
ndim = self.ndim
if self.channelIndex < ndim:
ndim -= 1
if ndim != 2:
raise RuntimeError("VigraArray.writeImage(): array must have 2 non-channel axes.")
vigra.impex.writeImage(self, filename, dtype, compression, mode)
def writeSlices(self, filename_base, filename_ext, dtype = '', compression = ''):
'''Write a volume to a sequence of files.
Consult :func:`vigra.impex.writeVolume` for detailed documentation.
'''
import vigra.impex
ndim = self.ndim
if self.channelIndex < ndim:
ndim -= 1
if ndim != 3:
raise RuntimeError("VigraArray.writeSlices(): array must have 3 non-channel axes.")
vigra.impex.writeVolume(self, filename_base, filename_ext, dtype, compression)
def writeHDF5(self, filenameOurGroup, pathInFile):
'''Write the array to a HDF5 file.
This is just a shortcut for :func:`vigra.impex.writeHDF5`
'''
import vigra.impex
vigra.impex.writeHDF5(self, filenameOurGroup, pathInFile)
def sendSocket(self, socket, flags=0, copy=True, track=False):
'''
Send array and metadata over a ZMQ socket. Only works if the 'zmq' module is available.
The meaning of the arguments is described in zmq.Socket.send().
'''
import zmq
transposed = self.transposeToNumpyOrder().view(numpy.ndarray)
metadata = dict(
dtype = str(transposed.dtype),
shape = transposed.shape,
permutation = self.permutationFromNumpyOrder()
)
socket.send_json(metadata, flags|zmq.SNDMORE)
socket.send(self.axistags.toJSON(), flags|zmq.SNDMORE)
return socket.send(transposed, flags, copy=copy, track=track)
def imshow(self):
'''
Shorthand for 'vigra.imshow(self)'.
'''
import vigra
return vigra.imshow(self)
def show(self, normalize=True):
'''
Display this image in a vigra.pyqt.ImageWindow.
The channels are intepreted as follows: 1 channel = gray image,
2 channels = gray + alpha, 3 channels = RGB, 4 channels = RGB + alpha.
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True: (default)
scale the image's actual range min()..max() to 0..255
`normalize` = False:
don't scale the image's values
'''
from pyqt.imagewindow import showImage
ndim = self.ndim
channelIndex = self.channelIndex
if channelIndex < ndim:
if self.channels > 4:
raise RuntimeError("VigraArray.show(): array can have at most 4 channels.")
ndim -= 1
if ndim != 2:
raise RuntimeError("VigraArray.show(): array must have 2 non-channel axes.")
return showImage(self.transposeToVigraOrder(), normalize)
def qimage(self, normalize=True):
'''
Convert this image to a Qt QImage (mainly for display purposes).
The present image must have 1, 2, 3, or 4 channels, and the resulting
QImage will have QImage.Format_Indexed8 iff there was only one
channel and QImage.Format_[A]RGB32 otherwise (with the last of
2/4 channels being used as alpha channel).
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True: (default)
scale the image's actual range min()..max() to 0..255
`normalize` = False:
don't scale the image's values
'''
try:
import qimage2ndarray
except Exception, e:
from vigra import _fallbackModule
_fallbackModule('qimage2ndarray',
'''
%s
If qimage2ndarray is missing on your system, download it from
http://pypi.python.org/pypi/qimage2ndarray/.''' % str(e))
import qimage2ndarray
ndim = self.ndim
if self.channelIndex < ndim:
ndim -= 1
if ndim != 2:
raise RuntimeError("VigraArray.qimage(): array must have 2 non-channel axes.")
yxImage = self.transposeToNumpyOrder()
if self.channels == 1:
q = qimage2ndarray.gray2qimage(yxImage.dropChannelAxis(), normalize)
else:
q = qimage2ndarray.array2qimage(yxImage, normalize)
return q
def asRGB(self, normalize=True):
'''
Expand a scalar array (i.e. an array with a single channel) into an RGB array with
three identical color channels. This is useful when you want to paste color
annotations (e.g. user labels) into the array.
The parameter `normalize` can be used to normalize the array's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip array values from nmin..nmax to 0..255
`normalize` = True: (default)
scale the array's actual range min()..max() to 0..255
`normalize` = False:
don't scale the array's values
'''
if self.channels != 1:
raise RuntimeError("VigraArray.asRGB(): array must have a single channel.")
img = self.dropChannelAxis()
shape = img.shape + (3,)
axistags = copy.copy(img.axistags)
axistags.append(AxisInfo.c)
res = VigraArray(shape, axistags=axistags)
if normalize:
try:
m, M = normalize
clip = True
except:
m, M = img.min(), img.max()
clip = False
if m == M:
return res
f = 255.0 / (M - m)
img = f * (img - m)
if clip:
img = numpy.minimum(255.0, numpy.maximum(0.0, img))
res[...,0] = img
res[...,1] = img
res[...,2] = img
return res
###############################################################
# #
# new functionality enabled by axistags #
# #
###############################################################
def copyValues(self, other):
'''
Copy the values of an array to another one. This is similar to::
self[...] = other
but will first transpose both arrays so that axistags are aligned. If
there is no valid alignment, RuntimeError will be raised.
'''
self._copyValuesImpl(self, other)
# IMPORTANT: do not remove or rename this property, it is called from C++
@property
def channelIndex(self):
'''
The index of the channel axis according to the axistags.
For example, when axistags are 'x y c', the channel index is 2.
If the axistags contain no channel axis, self.ndim is returned.
'''
return self.axistags.channelIndex
# IMPORTANT: do not remove or rename this property, it is called from C++
@property
def innerNonchannelIndex(self):
'''
The index of the innermost non-channel axis according to the axistags.
The innermost axis is determined by the AxisInfo sorting rules (see
the :ref:`order definitions <array-order-parameter>` for details).
For example, when axistags are 'x y c', the innerNonchannelIndex is 0.
'''
return self.axistags.innerNonchannelIndex
@property
def channels(self):
'''
The number of channels in this array (shape of the 'c' axis).
If the axistags contain no channel axis, the number of channels is implicitly 1.
'''
i = self.channelIndex
if i < self.ndim:
return self.shape[i]
else:
return 1
@property
def width(self):
'''
The width of the array (shape of the 'x' axis).
If the axistags contain no 'x' axis, RuntimeError will be raised.
'''
i = self.axistags.index('x')
if i < self.ndim:
return self.shape[i]
else:
raise RuntimeError("VigraArray.width(): axistag 'x' does not exist.")
@property
def height(self):
'''
The height of the array (shape of the 'y' axis).
If the axistags contain no 'y' axis, RuntimeError will be raised.
'''
i = self.axistags.index('y')
if i < self.ndim:
return self.shape[i]
else:
raise RuntimeError("VigraArray.height(): axistag 'y' does not exist.")
@property
def depth(self):
'''
The depth of the array (shape of the 'z' axis).
If the axistags contain no 'z' axis, RuntimeError will be raised.
'''
i = self.axistags.index('z')
if i < self.ndim:
return self.shape[i]
else:
raise RuntimeError("VigraArray.depth(): axistag 'z' does not exist.")
@property
def duration(self):
'''
The number of time steps in the array (shape of the 't' axis).
If the axistags contain no 't' axis, RuntimeError will be raised.
'''
i = self.axistags.index('t')
if i < self.ndim:
return self.shape[i]
else:
raise RuntimeError("VigraArray.duration(): axistag 't' does not exist.")
@property
def spatialDimensions(self):
'''
The number of spatial axes in the array.
That is, the number of entries in the axistags where the flag 'AxisType.Space'
is set.
'''
return self.axistags.axisTypeCount(AxisType.Space)
def iterImpl(self, type):
axes = [k for k in xrange(self.ndim) if self.axistags[k].isType(type)]
if axes:
axes.sort(key=lambda x: self.axistags[x], reverse=True)
slices = [slice(None)]*self.ndim
for point in numpy.ndindex(*(self.shape[k] for k in axes)):
for j in xrange(len(point)):
slices[axes[j]] = point[j]
yield self[tuple(slices)]
else:
yield self
def channelIter(self):
'''
Create an iterator over the channels of the array.
In each iteration, you get the array corresponding to a single channel.
If the axistags contain no channel axis, there is only one iteration
which yields the entire array. Example::
>>> rgb = vigra.RGBImage((200, 100))
>>> rgb.axistags
x y c
>>> red, green, blue = rgb.channelIter()
>>> red.axistags
x y
>>> red.shape
(200, 100)
'''
return self.iterImpl(AxisType.Channels)
def spaceIter(self):
'''
Create an iterator over all the spatial coordinates in the array.
In each iteration, you get the value corresponding to a single
coordinate location. If the axistags contain no spatial axes,
there is only one iteration which yields the entire array. Example::
>>> s = vigra.ScalarImage((2,2))
>>> s.ravel()[...] = range(4)
>>> for p in s.spaceIter():
.... print p
0.0
1.0
2.0
3.0
'''
return self.iterImpl(AxisType.Space)
def timeIter(self):
'''
Create an iterator over the time points of the array.
In each iteration, you get the array corresponding to a single time point.
If the axistags contain no time axis, there is only one iteration
which yields the entire array. Example::
>>> from vigra import *
>>> axistags = AxisTags(AxisInfo.t, AxisInfo.x, AxisInfo.y)
>>> timesteps, width, height = 2, 200, 100
>>> image_sequence = Image((timesteps, width, height), axistags=axistags)
>>> step1, step2 = image_sequence.timeIter()
'''
return self.iterImpl(AxisType.Time)
def sliceIter(self, key='z'):
'''
Create an iterator over a single spatial axis of the array.
In each iteration, you get the array corresponding to one coordinate
along the axis given by 'key'. For example, to iterate along the z-axis
to get all x-y-slices in turn, you write::
>>> volume = vigra.Volume((width, height, depth))
>>> for slice in volume.sliceIter('z'):
... processSlice(slice)
'''
i = self.axistags.index(key)
if i < self.ndim:
if not self.axistags[i].isSpatial():
raise RuntimeError("VigraArray.sliceIter(): %s is not a spatial axis." % key)
for k in xrange(self.shape[i]):
yield self.bindAxis(i, k)
else:
yield self
def bindAxis(self, which, index=0):
'''
Bind the axis identified by 'which' to the given 'index'.
This is similar to::
array[:, index, ...]
but you do not need to know the position of the axis when you use the
axis key (according to axistags). For example, to get the green channel
of an RGBImage, you write::
>>> rgb = vigra.RGBImage((200, 100))
>>> green = rgb.bindAxis('c', 1)
This gives the correct result irrespective of the axis ordering.
'''
if type(which) == str:
which = self.axistags.index(which)
return self[(slice(None),)*which + (index,) + (slice(None),)*(self.ndim-which-1)]
def dropChannelAxis(self, ignoreMultiChannel=False):
'''
Drop the channel axis when it is a singleton.
This function is for easy transformation of an array shaped
(width, height, 1) into (width, height). A RuntimeError
is raised when there is more than one channel, unless ignoreMultiChannel=True,
in which case 'self' is returned.
'''
ci = self.channelIndex
if ci == self.ndim:
return self
if self.shape[ci] != 1:
if ignoreMultiChannel:
return self
raise RuntimeError("dropChannelAxis(): only allowed when there is a single channel.")
return self.bindAxis(ci, 0)
def insertChannelAxis(self, order=None):
'''
Insert a singleton channel axis.
This function is for easy transformation of an array shaped
(width, height) into (width, height, 1). The 'order' parameter
determines the position of the new axis: when order is 'F', it
will become the first axis, otherwise it will become the last
one. A RuntimeError is raised when the array already contains a
channel axis.
'''
ci = self.channelIndex
if ci != self.ndim:
return self
if order == 'F':
res = self[numpy.newaxis,...]
res.axistags[0] = AxisInfo.c
else:
res = self[..., numpy.newaxis]
res.axistags[-1] = AxisInfo.c
return res
def withAxes(self, *axiskeys):
'''
Create a view containing the desired axis keys in the given
order. When the array contains an axis not listed, the axis
will be dropped if it is a singfleton (otherwise, an exception
is raised). If a requested key is not present in this array,
a singleton axis will be inserted at that position, if the
missing key is among the known standard keys (otherwise, an
exception is raised). The function fails if this array contains
axes of unknown type (key '?'). If 'self' is already suitable,
it is simply retured without generating a new view.
Usage::
>>> a = vigra.ScalarVolume((200, 100))
>>> a.axistags
x y
>>> a.shape
(200, 100)
>>> b = a.withAxes('y', 'x', 'c')
>>> b.axistags
y x c
>>> b.shape
(100, 200, 1)
'''
if repr(self.axistags) == ' '.join(axiskeys):
return self
axisinfo = []
slicing = [0]*self.ndim
for key in axiskeys:
index = self.axistags.index(key)
if index < self.ndim:
axisinfo.append(self.axistags[index])
slicing[index] = slice(None)
else:
axisinfo.append(eval('AxisInfo.%s' % key))
slicing.append(axisinfo[-1])
for k in xrange(self.ndim):
if self.axistags[k].isType(AxisType.UnknownAxisType):
raise RuntimeError("VigraArray.ensureAxes(): array must not contain axes of unknown type (key '?').")
if slicing[k] == 0 and self.shape[k] != 1:
raise RuntimeError("VigraArray.ensureAxes(): cannot drop non-singleton axis '%s'." % self.axistags[k].key)
permutation = AxisTags(axisinfo).permutationFromNumpyOrder()
return self[slicing].transposeToNumpyOrder().transpose(permutation)
def view5D(self, order='C'):
'''
Create a 5-dimensional view containing the standard tags
'x', 'y', 'z', 't', 'c' in the desired 'order' (which can be
'C', 'F', and 'V' with the usual meaning). If 'self' has an
axis key that is not among the five admissible keys, an
exception is raised. Axes missing in 'self' are added as
singleton axes with the appropriate tags.
'''
stdTags = ['x', 'y', 'z', 't', 'c']
for tag in self.axistags:
try:
del stdTags[stdTags.index(tag.key)]
except:
raise RuntimeError("VigraArray.view5D(): array contains unsuitable axis key '%s'." % tag.key)
index = [Ellipsis] + [newaxis(eval('AxisInfo.' + k)) for k in stdTags]
return self[index].transposeToOrder(order)
def permutationToOrder(self, order):
'''Create the permutation that would transpose this array into
an array view with the given order (where order can be 'A',
'C', 'F', 'V' with the usual meaning).
'''
return list(self.axistags.permutationToOrder(order))
def permutationToNormalOrder(self, types=AxisType.AllAxes):
'''Create the permutation that would transpose this array to
normal order (that is, from the current axis order into
ascending order, e.g. 'x y c' into 'c x y').
If 'types' is not 'AxisType.AllAxes', only the axes with the
desired types are considered.
'''
return list(self.axistags.permutationToNormalOrder(types))
def permutationFromNormalOrder(self):
'''Create the permutation that would transpose an array that is
in normal (ascending) order into the axis order of this array.
(e.g. 'c x y' into 'x y c').
'''
return list(self.axistags.permutationFromNormalOrder())
def permutationToNumpyOrder(self):
'''Create the permutation that would transpose this array to
numpy order (that is, from the current axis order into
descending order, e.g. 'x y c' into 'y x c').
'''
return list(self.axistags.permutationToNumpyOrder())
def permutationFromNumpyOrder(self):
'''Create the permutation that would transpose an array that is
in numpy (descending) order into the axis order of this array.
(e.g. 'y x c' into 'x y c').
'''
return list(self.axistags.permutationFromNumpyOrder())
def permutationToVigraOrder(self):
'''Create the permutation that would transpose this array to
VIGRA order (that is, from the current axis order into
ascending spatial order, but with the channel axis at the
last position, e.g. 'c x y' into 'x y c').
'''
return list(self.axistags.permutationToVigraOrder())
def permutationFromVigraOrder(self):
'''Create the permutation that would transpose an array that is
in VIGRA order (ascending spatial order, but with the channel
axis at the last position) into the axis order of this array.
(e.g. 'x y c' into 'c x y').
'''
return list(self.axistags.permutationFromVigraOrder())
def transposeToOrder(self, order):
'''
Get a transposed view onto this array according to the given 'order'.
Possible orders are:
'A':
return the array unchanged
'C':
transpose to descending axis order (e.g. 'z y x c')
'F':
transpose to ascending axis order (e.g. 'c x y z')
'V':
transpose to VIGRA order, i.e. ascending spatial axes, but
the channel axis is last (e.g. 'x y z c')
'''
if order == 'A':
return self
permutation = self.permutationToOrder(order)
return self.transpose(permutation)
def transposeToDefaultOrder(self):
'''Equivalent to self.transposeToOrder(VigraArray.defaultOrder).
'''
return self.transposeToOrder(VigraArray.defaultOrder)
def transposeToNormalOrder(self):
'''Equivalent to self.transposeToOrder('F').
'''
return self.transposeToOrder('F')
def transposeToVigraOrder(self):
'''Equivalent to self.transposeToOrder('V').
'''
return self.transposeToOrder('V')
def transposeToNumpyOrder(self):
'''Equivalent to self.transposeToOrder('C').
'''
return self.transposeToOrder('C')
@property
def T(self):
'''
Equivalent to self.transpose()
'''
return self.transpose()
def __getitem__(self, index):
'''
``array.__getitem__(index)`` implements the indexing operator ``array[index]``.
In addition to the usual numpy.ndarray indexing functionality, this function
also updates the axistags of the result array. There are three cases:
* getitem creates a scalar value => no axistags are required
* getitem creates an array view => axistags are transferred from the
corresponding axes of the base array
* getitem creates a copy of an array (fancy indexing) => all axistags are '?'
If the index contains 'numpy.newaxis', a new singleton axis is inserted at the
appropriate position, whose axisinfo is set to '?' (unknown). If the index contains
'vigra.newaxis(axisinfo)', the singleton axis will get the given axisinfo.
'''
try:
res = numpy.ndarray.__getitem__(self, index)
except:
if not isinstance(index, collections.Iterable):
raise
res = numpy.ndarray.__getitem__(self,
map(lambda x: None if isinstance(x, AxisInfo) else x, index))
if res is not self and hasattr(res, 'axistags'):
if res.base is self or res.base is self.base:
res.axistags = res._transform_axistags(index)
else:
res.axistags = res._empty_axistags(res.ndim)
return res
def subarray(self, p1, p2=None):
'''
Construct a subarray view from a pair of points. The first point denotes the start
of the subarray (inclusive), the second its end (exclusive). For example,
a.subarray((1,2,3), (4,5,6)) # equivalent to a[1:4, 2:5, 3:6]
The given points must have the same dimension, otherwise an IndexError is raised.
If only one point is given, it refers to the subarray's end, and the start is set
to the point (0, 0, ...) with appropriate dimension, for example
a.subarray((4,5,6)) # equivalent to a[:4, :5, :6]
The function transforms the given point pair into a tuple of slices and calls
self.__getitem__() in it. If the points have lower dimension than the array, an
Ellipsis ('...') is implicitly appended to the slicing, so that missing axes
are left unaltered.
'''
if p2 is not None:
if len(p1) != len(p2):
raise IndexError('VigraArray.subarray(): points must have the same dimension.')
return self.__getitem__(tuple(map(lambda x,y: slice(x.__int__(), y.__int__()), p1, p2)))
else:
return self.__getitem__(tuple(map(lambda x: slice(x.__int__()), p1)))
###############################################################
# #
# re-implement ndarray methods to handle axistags #
# #
###############################################################
@_finalize_reduce_result
@_preserve_doc
def all(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
return numpy.ndarray.all(self, axis, out)
@_finalize_reduce_result
@_preserve_doc
def any(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
return numpy.ndarray.any(self, axis, out)
@_finalize_reduce_result
@_preserve_doc
def argmax(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
return numpy.ndarray.argmax(self, axis, out)
@_finalize_reduce_result
@_preserve_doc
def argmin(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
return numpy.ndarray.argmin(self, axis, out)
@_preserve_doc
def copy(self, order='A'):
return self.__class__(self, dtype=self.dtype, order=order)
@_preserve_doc
def cumprod(self, axis=None, dtype=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
res = numpy.ndarray.cumprod(self, axis, dtype, out)
if axis is None and out is None:
res.axistags = res._empty_axistags(res.ndim)
return res
@_preserve_doc
def cumsum(self, axis=None, dtype=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
res = numpy.ndarray.cumsum(self, axis, dtype, out)
if axis is None and out is None:
res.axistags = res._empty_axistags(res.ndim)
return res
@property
def flat(self):
'''
The array is always transposed to 'C' order before flattening.
'''
return self.transposeToNumpyOrder().view(numpy.ndarray).flat
@_preserve_doc
def flatten(self, order='C'):
'''
The array is always transposed to 'C' order before flattening.
'''
res = self.transposeToNumpyOrder().view(numpy.ndarray).flatten(order)
return taggedView(res, self._empty_axistags(1))
@_finalize_reduce_result
@_preserve_doc
def max(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return numpy.ndarray.max(self, axis, out)
@_preserve_doc
def mean(self, axis=None, dtype=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return _numpyarray_overloaded_function(numpy.ndarray.mean, self, axis, dtype, out)
@_finalize_reduce_result
@_preserve_doc
def min(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return numpy.ndarray.min(self, axis, out)
@_preserve_doc
def nonzero(self):
res = numpy.ndarray.nonzero(self)
for k in xrange(len(res)):
res[k].axistags = AxisTags(AxisInfo(self.axistags[k]))
return res
@property
def order(self):
if self.flags.c_contiguous:
return 'C'
elif self.flags.f_contiguous:
return 'F'
elif self.channelIndex == self.ndim-1 and self.itemsize == self.strides[-1] and \
reduce(lambda x, y: y if y >= x and x >= 0 else -1, self.strides[:-1], 0) >= 0:
return 'V'
return 'A'
@_preserve_doc
def prod(self, axis=None, dtype=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return _numpyarray_overloaded_function(numpy.ndarray.prod, self, axis, dtype, out)
@_preserve_doc
def ptp(self, axis=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
if axis is None:
return self.transposeToOrder('C').view(numpy.ndarray).ptp(out=out)
else:
res = self.view(numpy.ndarray).ptp(axis, out)
if out is None:
res = res.view(VigraArray)
res.axistags = self._copy_axistags()
del res.axistags[axis]
return res
@_preserve_doc
def ravel(self, order='C'):
'''
The array is always transposed to 'C' order before flattening.
'''
res = self.transposeToNumpyOrder().view(numpy.ndarray).ravel(order)
return taggedView(res, self._empty_axistags(1))
@_preserve_doc
def repeat(self, repeats, axis=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
if axis is None:
return numpy.ndarray.repeat(self.ravel(), repeats)
else:
return numpy.ndarray.repeat(self, repeats, axis)
@_preserve_doc
def reshape(self, shape, order='C', axistags=None):
'''
An additional keyword argument 'axistags' can be used to determine
the result's axistags. If not given, all axes of the result will
have type 'unknown'.
'''
if axistags is not None and len(shape) != len(axistags):
raise RuntimeError("VigraArray.reshape(): size mismatch between shape and axistags.")
res = numpy.ndarray.reshape(self, shape, order=order)
if axistags is not None:
res.axistags = copy.copy(axistags)
else:
res.axistags = res._empty_axistags(res.ndim)
return res
@_preserve_doc
def resize(self, new_shape, refcheck=True, order=False, axistags=None):
'''
An additional keyword argument 'axistags' can be used to determine
the self's axistags after the resize. If not given, all axes will have
type 'unknown'.
'''
# ndarray.resize() internally checks for refcount <= 2
# We need to increase the threshold because we have two
# additional references ('self' and the argument to 'sys.getrefcount')
if sys.getrefcount(self) <= 4:
refcheck = False
if axistags is not None and len(new_shape) != len(axistags):
raise RuntimeError("VigraArray.resize(): size mismatch between shape and axistags.")
numpy.ndarray.resize(self, new_shape, refcheck=refcheck)
if axistags is not None:
self.axistags = copy.copy(axistags)
else:
self.axistags = self._empty_axistags(self.ndim)
@_preserve_doc
def squeeze(self):
res = numpy.ndarray.squeeze(self)
if self.ndim != res.ndim:
res.axistags = res._copy_axistags()
for k in xrange(self.ndim-1, -1, -1):
if self.shape[k] == 1:
del res.axistags[k]
return res
@_preserve_doc
def std(self, axis=None, dtype=None, out=None, ddof=0):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return _numpyarray_overloaded_function(numpy.ndarray.std, self, axis, dtype, out)
@_preserve_doc
def sum(self, axis=None, dtype=None, out=None):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return _numpyarray_overloaded_function(numpy.ndarray.sum, self, axis, dtype, out)
@_preserve_doc
def swapaxes(self, i, j, keepTags=False):
'''
Parameters 'i' and 'j' can also be ints (axis positions) or strings (axis keys).
If 'keepsTags' is False, axistags are swapped like the axes, otherwise they remain
unchanged such that the swapped axes aquire a new meaning.
'''
if type(i) == str:
i = self.axistags.index(i)
if type(j) == str:
j = self.axistags.index(j)
res = numpy.ndarray.swapaxes(self, i, j)
res.axistags = res._copy_axistags()
if not keepTags:
try:
res.axistags.swapaxes(i, j)
except:
res.axistags[i], res.axistags[j] = res.axistags[j], res.axistags[i]
return res
@_preserve_doc
def take(self, indices, axis=None, out=None, mode='raise'):
'''
The array is always transposed to 'C' order before flattening.
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
if type(axis) == str:
axis = self.axistags.index(axis)
if axis is None:
return numpy.ndarray.take(self.ravel(), indices, axis, out, mode)
else:
return numpy.ndarray.take(self, indices, axis, out, mode)
@_preserve_doc
def transpose(self, *axes, **keepTags):
'''
An additional keyword parameter 'keepTags' can be provided (it has to be passed as an explicit
keyword parameter). If it is True, the axistags will remain unchanged such that the transposed
axes aquire a new meaning.
'''
keepTags = keepTags.get('keepTags', False)
res = numpy.ndarray.transpose(self, *axes)
if not keepTags:
res.axistags = res._transpose_axistags(*axes)
return res
@_preserve_doc
def var(self, axis=None, dtype=None, out=None, ddof=0):
'''
The 'axis' parameter can be an int (axis position) or string (axis key).
'''
return _numpyarray_overloaded_function(numpy.ndarray.var, self, axis, dtype, out)
###############################################################
# #
# reimplement the numerical operators to make #
# sure that array order is preserved #
# #
###############################################################
def __abs__(self):
return ufunc.absolute(self)
def __add__(self, other):
return ufunc.add(self, other)
def __and__(self, other):
return ufunc.bitwise_and(self, other)
def __div__(self, other):
return ufunc.divide(self, other)
def __divmod__(self, other):
return ufunc.floor_divide(self, other), ufunc.remainder(self, other)
def __eq__(self, other):
return ufunc.equal(self, other)
def __floordiv__(self, other):
return ufunc.floor_divide(self, other)
def __ge__(self, other):
return ufunc.greater_equal(self, other)
def __gt__(self, other):
return ufunc.greater(self, other)
def __invert__(self):
return ufunc.invert(self)
def __le__(self, other):
return ufunc.less_equal(self, other)
def __lshift__(self, other):
return ufunc.left_shift(self, other)
def __lt__(self, other):
return ufunc.less(self, other)
def __mod__(self, other):
return ufunc.remainder(self, other)
def __mul__(self, other):
return ufunc.multiply(self, other)
def __ne__(self, other):
return ufunc.not_equal(self, other)
def __neg__(self):
return ufunc.negative(self)
def __or__(self, other):
return ufunc.bitwise_or(self, other)
def __pos__(self):
return self
def __pow__(self, other):
return ufunc.power(self, other)
def __radd__(self, other):
return ufunc.add(other, self)
def __radd__(self, other):
return ufunc.add(other, self)
def __rand__(self, other):
return ufunc.bitwise_and(other, self)
def __rdiv__(self, other):
return ufunc.divide(other, self)
def __rdivmod__(self, other):
return ufunc.floor_divide(other, self), ufunc.remainder(other, self)
def __rfloordiv__(self, other):
return ufunc.floor_divide(other, self)
def __rlshift__(self, other):
return ufunc.left_shoft(other, self)
def __rmod__(self, other):
return ufunc.remainder(other, self)
def __rmul__(self, other):
return ufunc.multiply(other, self)
def __ror__(self, other):
return ufunc.bitwise_or(other, self)
def __rpow__(self, other):
return ufunc.power(other, self)
def __rrshift__(self, other):
return ufunc.right_shift(other, self)
def __rshift__(self, other):
return ufunc.right_shift(self, other)
def __rsub__(self, other):
return ufunc.subtract(other, self)
def __rtruediv__(self, other):
return ufunc.true_divide(other, self)
def __rxor__(self, other):
return ufunc.bitwise_xor(other, self)
def __sub__(self, other):
return ufunc.subtract(self, other)
def __truediv__(self, other):
return ufunc.true_divide(self, other)
def __xor__(self, other):
return ufunc.bitwise_xor(self, other)
##################################################################
# channelCount == None: array must not have channels
# channelCount == 0: array can have arbitrary number of channels (including None)
def _adjustShape(shape, order, spatialDimensions, channelCount, axistags, name):
if order is None:
order = VigraArray.defaultOrder
if len(shape) == spatialDimensions:
if channelCount is not None and channelCount == 0:
channelCount = 1
if channelCount:
if order == 'F':
shape = (channelCount,) + shape
else:
shape = shape + (channelCount,)
else:
if channelCount is None or len(shape) != spatialDimensions + 1:
raise RuntimeError("%s: input shape has wrong length." % name)
if channelCount > 0:
if order == 'F':
if shape[0] != channelCount:
raise RuntimeError("%s: input shape has wrong number of channels." % name)
else:
if shape[-1] != channelCount:
raise RuntimeError("%s: input shape has wrong number of channels." % name)
if axistags is None:
axistags = VigraArray.defaultAxistags(spatialDimensions+1, order)
if len(shape) == spatialDimensions:
axistags.dropChannelAxis()
if len(shape) != len(axistags):
raise RuntimeError("%s: size mismatch between shape and axistags." % name)
return shape, axistags
def _adjustArray(array, order, spatialDimensions, channelCount, axistags, name):
if order is None:
order = VigraArray.defaultOrder
if array.ndim == spatialDimensions:
if channelCount is not None and channelCount > 1:
raise RuntimeError("%s: input array has too few dimensions." % name)
if hasattr(array, 'axistags'):
if array.channelIndex != array.ndim:
raise RuntimeError("%s: input array has too few non-channel axes." % name)
if channelCount:
if hasattr(array, 'axistags'):
array = array.insertChannelAxis(order)
elif order == 'F':
array = array[numpy.newaxis,...]
else:
array = array[...,numpy.newaxis]
else:
if channelCount is None or array.ndim != spatialDimensions+1:
raise RuntimeError("%s: input array has wrong number of dimensions." % name)
if hasattr(array, 'axistags'):
channelIndex = array.channelIndex
if channelIndex == array.ndim:
raise RuntimeError("%s: input array has no channel axis." % name)
if channelCount > 0 and array.shape[channelIndex] != channelCount:
raise RuntimeError("%s: input array has wrong number of channels." % name)
if axistags is None:
if hasattr(array, 'axistags'):
axistags = copy.copy(array.axistags)
else:
axistags = VigraArray.defaultAxistags(spatialDimensions+1, order)
if array.ndim == spatialDimensions:
axistags.dropChannelAxis()
if array.ndim != len(axistags):
raise RuntimeError("%s: axistags have wrong number of axes." % name)
return array, axistags
def _adjustInput(obj, order, spatialDimensions, channelCount, axistags, name):
if isinstance(obj, numpy.ndarray):
return _adjustArray(obj, order, spatialDimensions, channelCount, axistags, name)
else:
return _adjustShape(obj, order, spatialDimensions, channelCount, axistags, name)
#################################################################
def Image(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing an image (i.e. an array with
two spatial axes 'x' and 'y' and optionally a channel axis 'c').
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are not image-like.
'''
obj, axistags = _adjustInput(obj, order, 2, 0, axistags, "vigra.Image()")
return VigraArray(obj, dtype, None, init, value, axistags)
def ScalarImage(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a single-band image (i.e. an
array with two spatial axes 'x' and 'y' and no channel axis).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a single-band image.
'''
obj, axistags = _adjustInput(obj, order, 2, None, axistags, "vigra.ScalarImage()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector2Image(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 2-band image (i.e. an
array with two spatial axes 'x' and 'y' and channel axis 'c' with 2 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 2-band image.
'''
obj, axistags = _adjustInput(obj, order, 2, 2, axistags, "vigra.Vector2Image()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector3Image(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 3-band image (i.e. an
array with two spatial axes 'x' and 'y' and channel axis 'c' with 3 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 3-band image.
'''
obj, axistags = _adjustInput(obj, order, 2, 3, axistags, "vigra.Vector3Image()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector4Image(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 4-band image (i.e. an
array with two spatial axes 'x' and 'y' and channel axis 'c' with 4 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 4-band image.
'''
obj, axistags = _adjustInput(obj, order, 2, 4, axistags, "vigra.Vector4Image()")
return VigraArray(obj, dtype, None, init, value, axistags)
def RGBImage(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a RGB image (i.e. an
array with two spatial axes 'x' and 'y' and channel axis 'c' with 3 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for an RGB image.
'''
obj, axistags = _adjustInput(obj, order, 2, 3, axistags, "vigra.RGBImage()")
res = VigraArray(obj, dtype, None, init, value, axistags)
res.axistags['c'].description = 'RGB'
return res
#################################################################
def Volume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a volume (i.e. an array with
three spatial axes 'x', 'y' and 'z' and optionally a channel axis 'c').
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are not volume-like.
'''
obj, axistags = _adjustInput(obj, order, 3, 0, axistags, "vigra.Volume()")
return VigraArray(obj, dtype, None, init, value, axistags)
def ScalarVolume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a single-band volume (i.e. an
array with three spatial axes 'x', 'y' and 'z' and no channel axis).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a single-band volume.
'''
obj, axistags = _adjustInput(obj, order, 3, None, axistags, "vigra.ScalarVolume()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector2Volume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 2-band volume (i.e. an
array with three spatial axes 'x', 'y' and 'z' and channel axis 'c' with 2 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 2-band volume.
'''
obj, axistags = _adjustInput(obj, order, 3, 2, axistags, "vigra.Vector2Volume()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector3Volume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 3-band volume (i.e. an
array with three spatial axes 'x', 'y' and 'z' and channel axis 'c' with 3 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 3-band volume.
'''
obj, axistags = _adjustInput(obj, order, 3, 3, axistags, "vigra.Vector3Volume()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector4Volume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 4-band volume (i.e. an
array with three spatial axes 'x', 'y' and 'z' and channel axis 'c' with 4 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 4-band volume.
'''
obj, axistags = _adjustInput(obj, order, 3, 4, axistags, "vigra.Vector4Volume()")
return VigraArray(obj, dtype, None, init, value, axistags)
def Vector6Volume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing a 6-band volume (i.e. an
array with three spatial axes 'x', 'y' and 'z' and channel axis 'c' with 6 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for a 6-band volume.
'''
obj, axistags = _adjustInput(obj, order, 3, 6, axistags, "vigra.Vector6Volume()")
return VigraArray(obj, dtype, None, init, value, axistags)
def RGBVolume(obj, dtype=numpy.float32, order=None,
init=True, value=None, axistags=None):
'''
Factory function for a :class:`~vigra.VigraArray` representing an RGB volume (i.e. an
array with three spatial axes 'x', 'y' and 'z' and channel axis 'c' with 3 channels).
Paramters are interpreted as in the VigraArray constructor, but an exception
will be raised if the shape or axistags are unsuitable for an RGB volume.
'''
obj, axistags = _adjustInput(obj, order, 3, 3, axistags, "vigra.RGBVolume()")
res = VigraArray(obj, dtype, None, init, value, axistags)
res.axistags['c'].description = 'RGB'
return res
#################################################################
class ImagePyramid(list):
def __init__(self, image, copyImageToLevel = 0, lowestLevel = 0, highestLevel = 0):
''' Create a new pyramid.
The new pyramid levels range from 'lowestLevel' to 'highestLevel' (inclusive),
and the given 'image' is copied to 'copyImageToLevel'. The images at other
levels are filled with zeros and sized so that the shape is reduced by half
when going up (to higher levels), and doubled when going down.
This class can handle multi-channel images, but only when image.channelIndex
exists and returns image.ndim-1 (i.e. the image must have axistags, and the
channel axis must correspond to the last index, as in C- or V-order).
'''
if lowestLevel > copyImageToLevel or highestLevel < copyImageToLevel:
raise ValueError('ImagePyramid(): copyImageToLevel must be between lowestLevel and highestLevel (inclusive)')
list.__init__(self, [image.__class__(image, dtype=image.dtype)])
self._lowestLevel = copyImageToLevel
self._highestLevel = copyImageToLevel
self.createLevel(lowestLevel)
self.createLevel(highestLevel)
@property
def lowestLevel(self):
'''The pyramids lowest level.
'''
return self._lowestLevel
@property
def highestLevel(self):
'''The pyramids highest level (inclusive).
'''
return self._highestLevel
def __getitem__(self, level):
'''Get the image at 'level'.
Raises IndexError when the level does not exist.
'''
if level < self.lowestLevel or level > self.highestLevel:
raise IndexError("ImagePyramid[level]: level out of range.")
return list.__getitem__(self, level - self.lowestLevel)
def __setitem__(self, level, image):
'''Copy the data of the given 'image' to the image at 'level'.
Raises IndexError when the level does not exist.
'''
self[level][...] = image[...]
def expandImpl(self, src, dest, centerValue):
import filters
ss, ds = src.shape, dest.shape
s = [ss[k] if 2*ss[k] == ds[k] else -1 for k in range(len(ss))]
smooth1 = filters.explictKernel(-1, 1, numpy.array([0.5 - centerValue, 2.0*centerValue, 0.5 - centerValue]))
smooth2 = filters.explictKernel(-1, 0, numpy.array([0.5, 0.5]));
filters.convolve(src, (smooth1, smooth1), out=dest[::2,::2])
filters.convolve(src[:,:s[1]], (smooth1, smooth2), out=dest[::2,1::2])
filters.convolve(src[:s[0],:], (smooth2, smooth1), out=dest[1::2,::2])
filters.convolve(src[:s[0],:s[1]], (smooth2, smooth2), out=dest[1::2,1::2])
def reduce(self, srcLevel, destLevel, centerValue = 0.42):
'''Reduce the image at 'srcLevel' to 'destLevel', using the Burt smoothing filter
with the given 'centerValue'. srcLevel must be smaller than destLevel.
For more details, see pyramidReduceBurtFilter_ in the C++ documentation.
'''
# FIXME: This should be implemented in C++
# FIXME: This should be implemented for arbitrary dimensions
import filters
if srcLevel > destLevel:
raise RuntimeError("ImagePyramid::reduce(): srcLevel <= destLevel required.")
if srcLevel < self.lowestLevel or srcLevel > self.highestLevel:
raise RuntimeError("ImagePyramid::reduce(): srcLevel does not exist.")
self.createLevel(destLevel)
smooth = filters.burtFilterKernel(0.25 - 0.5*centerValue)
for k in range(srcLevel, destLevel):
i = filters.convolve(self[k], smooth)
self[k+1] = i[::2,::2]
def expand(self, srcLevel, destLevel, centerValue = 0.42):
'''Expand the image at 'srcLevel' to 'destLevel', using the Burt smoothing filter
with the given 'centerValue'. srcLevel must be larger than destLevel.
For more details, see pyramidExpandBurtFilter_ in the C++ documentation.
'''
# FIXME: This should be implemented in C++
# FIXME: This should be implemented for arbitrary dimensions
if srcLevel < destLevel:
raise RuntimeError("ImagePyramid::expand(): srcLevel >= destLevel required.")
if srcLevel < self.lowestLevel or srcLevel > self.highestLevel:
raise RuntimeError("ImagePyramid::expand(): srcLevel does not exist.")
self.createLevel(destLevel)
for k in range(srcLevel, destLevel, -1):
self.expandImpl(self[k], self[k-1], centerValue)
def reduceLaplacian(self, srcLevel, destLevel, centerValue = 0.42):
'''Reduce the image at 'srcLevel' to 'destLevel', using the Burt smoothing filter
with the given 'centerValue', and compute Laplacian images for the levels
srcLevel ... destLevel-1. srcLevel must be smaller than destLevel.
For more details, see pyramidReduceBurtLaplacian_ in the C++ documentation.
'''
# FIXME: This should be implemented in C++
# FIXME: This should be implemented for arbitrary dimensions
import filters
if srcLevel > destLevel:
raise RuntimeError("ImagePyramid::reduceLaplacian(): srcLevel <= destLevel required.")
if srcLevel < self.lowestLevel or srcLevel > self.highestLevel:
raise RuntimeError("ImagePyramid::reduceLaplacian(): srcLevel does not exist.")
self.createLevel(destLevel)
smooth = filters.burtFilterKernel(0.25 - 0.5*centerValue)
for k in range(srcLevel, destLevel):
i = filters.convolve(self[k], smooth)
self[k+1] = i[::2,::2]
self.expandImpl(self[k+1], i, centerValue)
self[k] = i - self[k]
def expandLaplacian(self, srcLevel, destLevel, centerValue = 0.42):
'''Expand the image at 'srcLevel' to 'destLevel', using the Burt smoothing filter
with the given 'centerValue', and reconstruct the images for the levels
srcLevel-1 ... destLevel from their Laplacian images. srcLevel must be larger than destLevel.
For more details, see pyramidExpandBurtLaplacian_ in the C++ documentation.
'''
# FIXME: This should be implemented in C++
# FIXME: This should be implemented for arbitrary dimensions
import filters
if srcLevel < destLevel:
raise RuntimeError("ImagePyramid::expandLaplacian(): srcLevel >= destLevel required.")
if srcLevel < self.lowestLevel or srcLevel > self.highestLevel:
raise RuntimeError("ImagePyramid::expandLaplacian(): srcLevel does not exist.")
self.createLevel(destLevel)
smooth1 = filters.explictKernel(-1, 1, numpy.array([0.5 - centerValue, 2.0*centerValue, 0.5 - centerValue]))
smooth2 = filters.explictKernel(-1, 0, numpy.array([0.5, 0.5]));
for k in range(srcLevel, destLevel, -1):
i = self[k-1].__class__(self[k-1].shape, dtype = self[k-1].dtype)
self.expandImpl(self[k], i, centerValue)
self[k-1] = i - self[k-1]
def createLevel(self, level):
''' Make sure that 'level' exists. If 'level' is outside the current range of levels,
empty images of the appropriate shape are inserted into the pyramid.
'''
if level > self.highestLevel:
for i in range(self.highestLevel, level):
image = list.__getitem__(self, -1)
newShape = [int((k + 1) / 2) for k in image.shape]
channelIndex = getattr(image, 'channelIndex', image.ndim)
if channelIndex < image.ndim:
newShape[channelIndex] = image.shape[channelIndex]
self.append(image.__class__(newShape, dtype=image.dtype))
self._highestLevel = level
elif level < self.lowestLevel:
image = list.__getitem__(self, 0)
for i in range(self.lowestLevel, level, -1):
newShape = [2*k-1 for k in image.shape]
channelIndex = getattr(image, 'channelIndex', image.ndim)
if channelIndex < image.ndim:
newShape[channelIndex] = image.shape[channelIndex]
self.insert(0, image.__class__(newShape, dtype=image.dtype))
self._lowestLevel = level
| mit |
GdZ/scriptfile | software/googleAppEngine/google/appengine/tools/dev_appserver_main.py | 2 | 25100 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runs a development application server for an application.
%(script)s [options] <application root>
Application root must be the path to the application to run in this server.
Must contain a valid app.yaml or app.yml file.
Options:
--address=ADDRESS, -a ADDRESS
Address to which this server should bind. (Default
%(address)s).
--clear_datastore, -c Clear the Datastore on startup. (Default false)
--debug, -d Use debug logging. (Default false)
--help, -h View this helpful message.
--port=PORT, -p PORT Port for the server to run on. (Default %(port)s)
--allow_skipped_files Allow access to files matched by app.yaml's
skipped_files (default False)
--auth_domain Authorization domain that this app runs in.
(Default gmail.com)
--backends Run the dev_appserver with backends support
(multiprocess mode).
--blobstore_path=DIR Path to directory to use for storing Blobstore
file stub data.
--clear_prospective_search Clear the Prospective Search subscription index
(Default false).
--clear_search_indexes Clear the Full Text Search indexes (Default false).
--datastore_path=DS_FILE Path to file to use for storing Datastore file
stub data.
(Default %(datastore_path)s)
--debug_imports Enables debug logging for module imports, showing
search paths used for finding modules and any
errors encountered during the import process.
--default_partition Default partition to use in the APPLICATION_ID.
(Default dev)
--disable_static_caching Never allow the browser to cache static files.
(Default enable if expiration set in app.yaml)
--disable_task_running When supplied, tasks will not be automatically
run after submission and must be run manually
in the local admin console.
--enable_sendmail Enable sendmail when SMTP not configured.
(Default false)
--high_replication Use the high replication datastore consistency
model. (Default false).
--history_path=PATH Path to use for storing Datastore history.
(Default %(history_path)s)
--multiprocess_min_port When running in multiprocess mode, specifies the
lowest port value to use when choosing ports. If
set to 0, select random ports.
(Default 9000)
--mysql_host=HOSTNAME MySQL database host.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_host)s')
--mysql_port=PORT MySQL port to connect to.
Used by the Cloud SQL (rdbms) stub.
(Default %(mysql_port)s)
--mysql_user=USER MySQL user to connect as.
Used by the Cloud SQL (rdbms) stub.
(Default %(mysql_user)s)
--mysql_password=PASSWORD MySQL password to use.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_password)s')
--mysql_socket=PATH MySQL Unix socket file path.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_socket)s')
--persist_logs Enables storage of all request and application
logs to enable later access. (Default false).
--require_indexes Disallows queries that require composite indexes
not defined in index.yaml.
--search_indexes_path=PATH Path to file to use for storing Full Text Search
indexes (Default %(search_indexes_path)s).
--show_mail_body Log the body of emails in mail stub.
(Default false)
--skip_sdk_update_check Skip checking for SDK updates. If false, fall back
to opt_in setting specified in .appcfg_nag
(Default false)
--smtp_host=HOSTNAME SMTP host to send test mail to. Leaving this
unset will disable SMTP mail sending.
(Default '%(smtp_host)s')
--smtp_port=PORT SMTP port to send test mail to.
(Default %(smtp_port)s)
--smtp_user=USER SMTP user to connect as. Stub will only attempt
to login if this field is non-empty.
(Default '%(smtp_user)s').
--smtp_password=PASSWORD Password for SMTP server.
(Default '%(smtp_password)s')
--task_retry_seconds How long to wait in seconds before retrying a
task after it fails during execution.
(Default '%(task_retry_seconds)s')
--use_sqlite Use the new, SQLite based datastore stub.
(Default false)
--port_sqlite_data Converts the data from the file based datastore
stub to the new SQLite stub, one time use only.
(Default false)
--[enable|disable]_console Enables/disables the interactive console.
(Default enabled if --address is unset,
disabled if --address is set)
"""
from google.appengine.tools import os_compat
import getopt
import logging
import os
import signal
import sys
import tempfile
import traceback
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
from google.appengine.api import yaml_errors
from google.appengine.dist import py_zipimport
from google.appengine.tools import appcfg
from google.appengine.tools import appengine_rpc
from google.appengine.tools import dev_appserver
from google.appengine.tools import dev_appserver_multiprocess as multiprocess
DEFAULT_ADMIN_CONSOLE_SERVER = 'appengine.google.com'
ARG_ADDRESS = 'address'
ARG_ADMIN_CONSOLE_HOST = 'admin_console_host'
ARG_ADMIN_CONSOLE_SERVER = 'admin_console_server'
ARG_ALLOW_SKIPPED_FILES = 'allow_skipped_files'
ARG_AUTH_DOMAIN = 'auth_domain'
ARG_BACKENDS = 'backends'
ARG_BLOBSTORE_PATH = 'blobstore_path'
ARG_CLEAR_DATASTORE = 'clear_datastore'
ARG_CLEAR_PROSPECTIVE_SEARCH = 'clear_prospective_search'
ARG_CLEAR_SEARCH_INDEX = 'clear_search_indexes'
ARG_DATASTORE_PATH = 'datastore_path'
ARG_DEBUG_IMPORTS = 'debug_imports'
ARG_DEFAULT_PARTITION = 'default_partition'
ARG_DISABLE_TASK_RUNNING = 'disable_task_running'
ARG_ENABLE_SENDMAIL = 'enable_sendmail'
ARG_HIGH_REPLICATION = 'high_replication'
ARG_HISTORY_PATH = 'history_path'
ARG_LOGIN_URL = 'login_url'
ARG_LOG_LEVEL = 'log_level'
ARG_MULTIPROCESS = multiprocess.ARG_MULTIPROCESS
ARG_MULTIPROCESS_API_PORT = multiprocess.ARG_MULTIPROCESS_API_PORT
ARG_MULTIPROCESS_API_SERVER = multiprocess.ARG_MULTIPROCESS_API_SERVER
ARG_MULTIPROCESS_APP_INSTANCE_ID = multiprocess.ARG_MULTIPROCESS_APP_INSTANCE_ID
ARG_MULTIPROCESS_BACKEND_ID = multiprocess.ARG_MULTIPROCESS_BACKEND_ID
ARG_MULTIPROCESS_BACKEND_INSTANCE_ID = multiprocess.ARG_MULTIPROCESS_BACKEND_INSTANCE_ID
ARG_MULTIPROCESS_FRONTEND_PORT = multiprocess.ARG_MULTIPROCESS_FRONTEND_PORT
ARG_MULTIPROCESS_MIN_PORT = multiprocess.ARG_MULTIPROCESS_MIN_PORT
ARG_MYSQL_HOST = 'mysql_host'
ARG_MYSQL_PASSWORD = 'mysql_password'
ARG_MYSQL_PORT = 'mysql_port'
ARG_MYSQL_SOCKET = 'mysql_socket'
ARG_MYSQL_USER = 'mysql_user'
ARG_PERSIST_LOGS = 'persist_logs'
ARG_PORT = 'port'
ARG_PROSPECTIVE_SEARCH_PATH = 'prospective_search_path'
ARG_REQUIRE_INDEXES = 'require_indexes'
ARG_SEARCH_INDEX_PATH = 'search_indexes_path'
ARG_SHOW_MAIL_BODY = 'show_mail_body'
ARG_SKIP_SDK_UPDATE_CHECK = 'skip_sdk_update_check'
ARG_SMTP_HOST = 'smtp_host'
ARG_SMTP_PASSWORD = 'smtp_password'
ARG_SMTP_PORT = 'smtp_port'
ARG_SMTP_USER = 'smtp_user'
ARG_STATIC_CACHING = 'static_caching'
ARG_TASK_RETRY_SECONDS = 'task_retry_seconds'
ARG_TRUSTED = 'trusted'
ARG_USE_SQLITE = 'use_sqlite'
ARG_PORT_SQLITE_DATA = 'port_sqlite_data'
ARG_CONSOLE = 'console'
SDK_PATH = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(os_compat.__file__)
)
)
)
PRODUCTION_VERSION = (2, 5)
WARN_ABOUT_PYTHON_VERSION = True
DEFAULT_ARGS = {
ARG_ADDRESS: 'localhost',
ARG_ADMIN_CONSOLE_HOST: None,
ARG_ADMIN_CONSOLE_SERVER: DEFAULT_ADMIN_CONSOLE_SERVER,
ARG_ALLOW_SKIPPED_FILES: False,
ARG_AUTH_DOMAIN: 'gmail.com',
ARG_BLOBSTORE_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.blobstore'),
ARG_CLEAR_DATASTORE: False,
ARG_CLEAR_PROSPECTIVE_SEARCH: False,
ARG_CLEAR_SEARCH_INDEX: False,
ARG_DATASTORE_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.datastore'),
ARG_DEFAULT_PARTITION: 'dev',
ARG_DISABLE_TASK_RUNNING: False,
ARG_ENABLE_SENDMAIL: False,
ARG_HIGH_REPLICATION: False,
ARG_HISTORY_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.datastore.history'),
ARG_LOGIN_URL: '/_ah/login',
ARG_LOG_LEVEL: logging.INFO,
ARG_MYSQL_HOST: 'localhost',
ARG_MYSQL_PASSWORD: '',
ARG_MYSQL_PORT: 3306,
ARG_MYSQL_SOCKET: '',
ARG_MYSQL_USER: '',
ARG_PERSIST_LOGS: False,
ARG_PORT: 8080,
ARG_PROSPECTIVE_SEARCH_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.prospective_search'),
ARG_REQUIRE_INDEXES: False,
ARG_SEARCH_INDEX_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.searchindexes'),
ARG_SHOW_MAIL_BODY: False,
ARG_SKIP_SDK_UPDATE_CHECK: False,
ARG_SMTP_HOST: '',
ARG_SMTP_PASSWORD: '',
ARG_SMTP_PORT: 25,
ARG_SMTP_USER: '',
ARG_STATIC_CACHING: True,
ARG_TASK_RETRY_SECONDS: 30,
ARG_TRUSTED: False,
ARG_USE_SQLITE: False,
ARG_PORT_SQLITE_DATA: False
}
OPTIONS = 'a:cdhp:'
LONG_OPTIONS = [
'address=',
'admin_console_host=',
'admin_console_server=',
'allow_skipped_files',
'auth_domain=',
'backends',
'blobstore_path=',
'clear_datastore',
'clear_prospective_search',
'clear_search_indexes',
'datastore_path=',
'debug',
'debug_imports',
'default_partition=',
'disable_static_caching',
'disable_task_running',
'enable_sendmail',
'help',
'high_replication',
'history_path=',
'multiprocess',
'multiprocess_api_port=',
'multiprocess_api_server',
'multiprocess_app_instance_id=',
'multiprocess_backend_id=',
'multiprocess_backend_instance_id=',
'multiprocess_frontend_port=',
'multiprocess_min_port=',
'mysql_host=',
'mysql_password=',
'mysql_port=',
'mysql_socket=',
'mysql_user=',
'persist_logs',
'port=',
'require_indexes',
'search_indexes_path=',
'show_mail_body',
'skip_sdk_update_check',
'smtp_host=',
'smtp_password=',
'smtp_port=',
'smtp_user=',
'task_retry_seconds=',
'trusted',
'use_sqlite',
'port_sqlite_data',
'enable_console',
'disable_console',
]
def PrintUsageExit(code):
"""Prints usage information and exits with a status code.
Args:
code: Status code to pass to sys.exit() after displaying usage information.
"""
render_dict = DEFAULT_ARGS.copy()
render_dict['script'] = os.path.basename(sys.argv[0])
print sys.modules['__main__'].__doc__ % render_dict
sys.stdout.flush()
sys.exit(code)
def ParseArguments(argv):
"""Parses command-line arguments.
Args:
argv: Command-line arguments, including the executable name, used to
execute this application.
Returns:
Tuple (args, option_dict) where:
args: List of command-line arguments following the executable name.
option_dict: Dictionary of parsed flags that maps keys from DEFAULT_ARGS
to their values, which are either pulled from the defaults, or from
command-line flags.
"""
option_dict = DEFAULT_ARGS.copy()
try:
opts, args = getopt.gnu_getopt(argv[1:], OPTIONS, LONG_OPTIONS)
except getopt.GetoptError, e:
print >>sys.stderr, 'Error: %s' % e
PrintUsageExit(1)
for option, value in opts:
if option in ('-h', '--help'):
PrintUsageExit(0)
if option in ('-d', '--debug'):
option_dict[ARG_LOG_LEVEL] = logging.DEBUG
if option in ('-p', '--port'):
try:
option_dict[ARG_PORT] = int(value)
if not (65535 > option_dict[ARG_PORT] > 0):
raise ValueError
except ValueError:
print >>sys.stderr, 'Invalid value supplied for port'
PrintUsageExit(1)
def expand_path(s):
return os.path.abspath(os.path.expanduser(s))
if option in ('-a', '--address'):
option_dict[ARG_ADDRESS] = value
if option == '--blobstore_path':
option_dict[ARG_BLOBSTORE_PATH] = expand_path(value)
if option == '--datastore_path':
option_dict[ARG_DATASTORE_PATH] = expand_path(value)
if option == '--search_indexes_path':
option_dict[ARG_SEARCH_INDEX_PATH] = expand_path(value)
if option == '--prospective_search_path':
option_dict[ARG_PROSPECTIVE_SEARCH_PATH] = expand_path(value)
if option == '--skip_sdk_update_check':
option_dict[ARG_SKIP_SDK_UPDATE_CHECK] = True
if option == '--use_sqlite':
option_dict[ARG_USE_SQLITE] = True
if option == '--port_sqlite_data':
option_dict[ARG_PORT_SQLITE_DATA] = True
if option == '--high_replication':
option_dict[ARG_HIGH_REPLICATION] = True
if option == '--history_path':
option_dict[ARG_HISTORY_PATH] = expand_path(value)
if option in ('-c', '--clear_datastore'):
option_dict[ARG_CLEAR_DATASTORE] = True
if option == '--clear_prospective_search':
option_dict[ARG_CLEAR_PROSPECTIVE_SEARCH] = True
if option == '--clear_search_indexes':
option_dict[ARG_CLEAR_SEARCH_INDEX] = True
if option == '--require_indexes':
option_dict[ARG_REQUIRE_INDEXES] = True
if option == '--mysql_host':
option_dict[ARG_MYSQL_HOST] = value
if option == '--mysql_port':
option_dict[ARG_MYSQL_PORT] = _ParsePort(value, '--mysql_port')
if option == '--mysql_user':
option_dict[ARG_MYSQL_USER] = value
if option == '--mysql_password':
option_dict[ARG_MYSQL_PASSWORD] = value
if option == '--mysql_socket':
option_dict[ARG_MYSQL_SOCKET] = value
if option == '--smtp_host':
option_dict[ARG_SMTP_HOST] = value
if option == '--smtp_port':
option_dict[ARG_SMTP_PORT] = _ParsePort(value, '--smtp_port')
if option == '--smtp_user':
option_dict[ARG_SMTP_USER] = value
if option == '--smtp_password':
option_dict[ARG_SMTP_PASSWORD] = value
if option == '--enable_sendmail':
option_dict[ARG_ENABLE_SENDMAIL] = True
if option == '--show_mail_body':
option_dict[ARG_SHOW_MAIL_BODY] = True
if option == '--auth_domain':
option_dict['_DEFAULT_ENV_AUTH_DOMAIN'] = value
if option == '--debug_imports':
option_dict['_ENABLE_LOGGING'] = True
if option == '--admin_console_server':
option_dict[ARG_ADMIN_CONSOLE_SERVER] = value.strip()
if option == '--admin_console_host':
option_dict[ARG_ADMIN_CONSOLE_HOST] = value
if option == '--allow_skipped_files':
option_dict[ARG_ALLOW_SKIPPED_FILES] = True
if option == '--disable_static_caching':
option_dict[ARG_STATIC_CACHING] = False
if option == '--disable_task_running':
option_dict[ARG_DISABLE_TASK_RUNNING] = True
if option == '--task_retry_seconds':
try:
option_dict[ARG_TASK_RETRY_SECONDS] = int(value)
if option_dict[ARG_TASK_RETRY_SECONDS] < 0:
raise ValueError
except ValueError:
print >>sys.stderr, 'Invalid value supplied for task_retry_seconds'
PrintUsageExit(1)
if option == '--trusted':
option_dict[ARG_TRUSTED] = True
if option == '--persist_logs':
option_dict[ARG_PERSIST_LOGS] = True
if option == '--backends':
option_dict[ARG_BACKENDS] = value
if option == '--multiprocess':
option_dict[ARG_MULTIPROCESS] = value
if option == '--multiprocess_min_port':
option_dict[ARG_MULTIPROCESS_MIN_PORT] = value
if option == '--multiprocess_api_server':
option_dict[ARG_MULTIPROCESS_API_SERVER] = value
if option == '--multiprocess_api_port':
option_dict[ARG_MULTIPROCESS_API_PORT] = value
if option == '--multiprocess_app_instance_id':
option_dict[ARG_MULTIPROCESS_APP_INSTANCE_ID] = value
if option == '--multiprocess_backend_id':
option_dict[ARG_MULTIPROCESS_BACKEND_ID] = value
if option == '--multiprocess_backend_instance_id':
option_dict[ARG_MULTIPROCESS_BACKEND_INSTANCE_ID] = value
if option == '--multiprocess_frontend_port':
option_dict[ARG_MULTIPROCESS_FRONTEND_PORT] = value
if option == '--default_partition':
option_dict[ARG_DEFAULT_PARTITION] = value
if option == '--enable_console':
option_dict[ARG_CONSOLE] = True
if option == '--disable_console':
option_dict[ARG_CONSOLE] = False
option_dict.setdefault(ARG_CONSOLE,
option_dict[ARG_ADDRESS] == DEFAULT_ARGS[ARG_ADDRESS])
return args, option_dict
def _ParsePort(port, description):
"""Parses a port number from a string.
Args:
port: string
description: string to use in error messages.
Returns: integer between 0 and 65535
Raises:
ValueError if port is not a valid port number.
"""
try:
port = int(port)
if not (65535 > port > 0):
raise ValueError
return port
except ValueError:
print >>sys.stderr, 'Invalid value %s supplied for %s' % (port, description)
PrintUsageExit(1)
def MakeRpcServer(option_dict):
"""Create a new HttpRpcServer.
Creates a new HttpRpcServer to check for updates to the SDK.
Args:
option_dict: The dict of command line options.
Returns:
A HttpRpcServer.
"""
server = appengine_rpc.HttpRpcServer(
option_dict[ARG_ADMIN_CONSOLE_SERVER],
lambda: ('unused_email', 'unused_password'),
appcfg.GetUserAgent(),
appcfg.GetSourceName(),
host_override=option_dict[ARG_ADMIN_CONSOLE_HOST])
server.authenticated = True
return server
def SigTermHandler(signum, frame):
"""Handler for TERM signal.
Raises a KeyboardInterrupt to perform a graceful shutdown on SIGTERM signal.
"""
raise KeyboardInterrupt()
def main(argv):
"""Runs the development application server."""
args, option_dict = ParseArguments(argv)
if len(args) != 1:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
root_path = args[0]
if '_DEFAULT_ENV_AUTH_DOMAIN' in option_dict:
auth_domain = option_dict['_DEFAULT_ENV_AUTH_DOMAIN']
dev_appserver.DEFAULT_ENV['AUTH_DOMAIN'] = auth_domain
if '_ENABLE_LOGGING' in option_dict:
enable_logging = option_dict['_ENABLE_LOGGING']
dev_appserver.HardenedModulesHook.ENABLE_LOGGING = enable_logging
log_level = option_dict[ARG_LOG_LEVEL]
option_dict['root_path'] = os.path.realpath(root_path)
logging.getLogger().setLevel(log_level)
default_partition = option_dict[ARG_DEFAULT_PARTITION]
appinfo = None
try:
appinfo, _, _ = dev_appserver.LoadAppConfig(
root_path, {}, default_partition=default_partition)
except yaml_errors.EventListenerError, e:
logging.error('Fatal error when loading application configuration:\n%s', e)
return 1
except dev_appserver.InvalidAppConfigError, e:
logging.error('Application configuration file invalid:\n%s', e)
return 1
version_tuple = tuple(sys.version_info[:2])
expected_version = PRODUCTION_VERSION
if appinfo.runtime == 'python27':
expected_version = (2, 7)
if ARG_MULTIPROCESS not in option_dict and WARN_ABOUT_PYTHON_VERSION:
if version_tuple < expected_version:
sys.stderr.write('Warning: You are using a Python runtime (%d.%d) that '
'is older than the production runtime environment '
'(%d.%d). Your application may be dependent on Python '
'behaviors that have changed and may not work correctly '
'when deployed to production.\n' % (
version_tuple[0], version_tuple[1],
expected_version[0], expected_version[1]))
if version_tuple > expected_version:
sys.stderr.write('Warning: You are using a Python runtime (%d.%d) that '
'is more recent than the production runtime environment '
'(%d.%d). Your application may use features that are '
'not available in the production environment and may '
'not work correctly when deployed to production.\n' % (
version_tuple[0], version_tuple[1],
expected_version[0], expected_version[1]))
if appinfo.runtime == 'python':
appcfg.MigratePython27Notice()
multiprocess.Init(argv, option_dict, root_path, appinfo)
dev_process = multiprocess.GlobalProcess()
port = option_dict[ARG_PORT]
login_url = option_dict[ARG_LOGIN_URL]
address = option_dict[ARG_ADDRESS]
allow_skipped_files = option_dict[ARG_ALLOW_SKIPPED_FILES]
static_caching = option_dict[ARG_STATIC_CACHING]
persist_logs = option_dict[ARG_PERSIST_LOGS]
skip_sdk_update_check = option_dict[ARG_SKIP_SDK_UPDATE_CHECK]
interactive_console = option_dict[ARG_CONSOLE]
if (option_dict[ARG_ADMIN_CONSOLE_SERVER] != '' and
not dev_process.IsSubprocess()):
server = MakeRpcServer(option_dict)
if skip_sdk_update_check:
logging.info('Skipping update check.')
else:
update_check = appcfg.UpdateCheck(server, appinfo)
update_check.CheckSupportedVersion()
if update_check.AllowedToCheckForUpdates():
update_check.CheckForUpdates()
if dev_process.IsSubprocess():
logging.getLogger().setLevel(logging.WARNING)
try:
dev_appserver.SetupStubs(appinfo.application,
_use_atexit_for_datastore_stub=True,
**option_dict)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error(str(exc_type) + ': ' + str(exc_value))
logging.debug(''.join(traceback.format_exception(
exc_type, exc_value, exc_traceback)))
return 1
frontend_port=option_dict.get(ARG_MULTIPROCESS_FRONTEND_PORT, None)
if frontend_port is not None:
frontend_port = int(frontend_port)
http_server = dev_appserver.CreateServer(
root_path,
login_url,
port,
sdk_dir=SDK_PATH,
serve_address=address,
allow_skipped_files=allow_skipped_files,
static_caching=static_caching,
default_partition=default_partition,
persist_logs=persist_logs,
frontend_port=frontend_port,
interactive_console=interactive_console)
signal.signal(signal.SIGTERM, SigTermHandler)
dev_process.PrintStartMessage(appinfo.application, address, port)
if dev_process.IsInstance():
logging.getLogger().setLevel(logging.INFO)
try:
try:
http_server.serve_forever()
except KeyboardInterrupt:
if not dev_process.IsSubprocess():
logging.info('Server interrupted by user, terminating')
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error('Error encountered:\n%s\nNow terminating.', info_string)
return 1
finally:
http_server.server_close()
finally:
done = False
while not done:
try:
multiprocess.Shutdown()
done = True
except KeyboardInterrupt:
pass
dev_appserver.TearDownStubs()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/pdfbase/_fontdata_widths_helveticaboldoblique.py | 112 | 3670 | widths = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 722,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 556,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 722,
'aring': 556,
'asciicircum': 584,
'asciitilde': 584,
'asterisk': 389,
'at': 975,
'atilde': 556,
'b': 611,
'backslash': 278,
'bar': 280,
'braceleft': 389,
'braceright': 389,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 280,
'bullet': 350,
'c': 556,
'caron': 333,
'ccedilla': 556,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 333,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 611,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 611,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 611,
'five': 556,
'fl': 611,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 611,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 611,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 556,
'l': 278,
'less': 584,
'logicalnot': 584,
'lslash': 278,
'm': 889,
'macron': 333,
'minus': 584,
'mu': 611,
'multiply': 584,
'n': 611,
'nine': 556,
'ntilde': 611,
'numbersign': 556,
'o': 611,
'oacute': 611,
'ocircumflex': 611,
'odieresis': 611,
'oe': 944,
'ogonek': 333,
'ograve': 611,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 611,
'p': 611,
'paragraph': 556,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 611,
'question': 611,
'questiondown': 611,
'quotedbl': 474,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 278,
'quoteright': 278,
'quotesinglbase': 278,
'quotesingle': 238,
'r': 389,
'registered': 737,
'ring': 333,
's': 556,
'scaron': 556,
'section': 556,
'semicolon': 333,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 333,
'thorn': 611,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 611,
'uacute': 611,
'ucircumflex': 611,
'udieresis': 611,
'ugrave': 611,
'underscore': 556,
'v': 556,
'w': 778,
'x': 556,
'y': 556,
'yacute': 556,
'ydieresis': 556,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
| gpl-2.0 |
zcchen/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
kartikshah1/Test | evaluate/utils/results.py | 1 | 3074 | '''
This file is not being used in any module
'''
from django.core.files.storage import default_storage
from django.core.files import File
from assignments.models import Program
from assignments.models import Testcase
from grader.settings import MEDIA_ROOT
import tarfile, os
class TestResults(object):
def __init__(self, user):
self.user = user
def setSubmissionErrors(self):
self.file_found = False
self.compiled = False
self.test_passed = False
def setCompilationFailed(self, errorMessages):
self.file_found = True
self.compiled = False
self.test_passed = False
self.compiler_errors = errorMessages
def setTestcaseFailed(self):
self.file_found = True
self.compiled = True
self.test_passed = False
def saveAssignmentResults(self, assignment):
programs = Program.objects.filter(assignment=assignment)
for program in programs:
self.saveProgramResults(program)
def saveProgramResults(self, program):
testcases = Testcase.objects.filter(program=program)
for testcase in testcases:
self.saveTestcaseResults(testcase)
def saveTestcaseResults(self, testcase, testResult=None):
outputfiles = []
if testcase.program.outputFiles:
src = os.path.join(MEDIA_ROOT, testcase.program.outputFiles.name)
tar = tarfile.open(src)
outputfiles = [a.name for a in tar.getmembers() if a.isfile() and os.path.isfile(a.name)]
tar.close()
# Save results to database.
testcaseResult, created = Results.objects.get_or_create(
user = self.user,
testcase = testcase
)
testcaseResult.file_found = self.file_found if hasattr(self, 'file_found') else True
testcaseResult.compiled = self.compiled if hasattr(self, 'compiled') else True
testcaseResult.compiler_errors = "\n".join(self.compiler_errors) if hasattr(self, 'compiler_errors') else ""
testcaseResult.test_passed = self.test_passed if hasattr(self, 'test_passed') else True
if testResult is not None:
testResult.runtime_errors = "\n".join(testResult.stderr)
if not created:
default_storage.delete(testcaseResult.output_files.path)
testcaseResult.save()
# Now add output-files to database.
stdoutFileName = "standardOutput_" + str(testcase.id)
stdOutputofTestcase = open(stdoutFileName, 'w')
if testResult is not None:
stdOutputofTestcase.write("\n".join(testResult.stdout))
stdOutputofTestcase.close()
outputfiles.append(stdoutFileName)
tarname = "output_file_" + str(testcase.id) + ".tar.bz2"
outputFilesTar = tarfile.open(name=tarname, mode="w:bz2")
for afile in outputfiles:
outputFilesTar.add(afile)
outputFilesTar.close()
testcaseResult.output_files.save(tarname, File(open(tarname)) )
| mit |
double-y/django | django/utils/datetime_safe.py | 535 | 2836 | # Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
import re
import time as ttime
from datetime import (
date as real_date, datetime as real_datetime, time as real_time,
)
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
@classmethod
def combine(cls, date, time):
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second,
time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
class time(real_time):
pass
def new_date(d):
"Generate a safe date from a datetime.date object."
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = ttime.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = ttime.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return s
| bsd-3-clause |
angelbbs/sunxi-u-boot-ZX803 | tools/patman/test.py | 32 | 7066 | #
# Copyright (c) 2011 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import os
import tempfile
import unittest
import checkpatch
import gitutil
import patchstream
import series
class TestPatch(unittest.TestCase):
"""Test this program
TODO: Write tests for the rest of the functionality
"""
def testBasic(self):
"""Test basic filter operation"""
data='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
BUG=chromium-os:13875
TEST=build U-Boot for Seaboard, boot
Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413
Review URL: http://codereview.chromium.org/6900006
Signed-off-by: Simon Glass <sjg@chromium.org>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
expected='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
Signed-off-by: Simon Glass <sjg@chromium.org>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
out = ''
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
infd.write(data)
infd.close()
exphandle, expname = tempfile.mkstemp()
expfd = os.fdopen(exphandle, 'w')
expfd.write(expected)
expfd.close()
patchstream.FixPatch(None, inname, series.Series(), None)
rc = os.system('diff -u %s %s' % (inname, expname))
self.assertEqual(rc, 0)
os.remove(inname)
os.remove(expname)
def GetData(self, data_type):
data='''
From 4924887af52713cabea78420eff03badea8f0035 Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 7 Apr 2011 10:14:41 -0700
Subject: [PATCH 1/4] Add microsecond boot time measurement
This defines the basics of a new boot time measurement feature. This allows
logging of very accurate time measurements as the boot proceeds, by using
an available microsecond counter.
%s
---
README | 11 ++++++++
common/bootstage.c | 50 ++++++++++++++++++++++++++++++++++++
include/bootstage.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
include/common.h | 8 ++++++
5 files changed, 141 insertions(+), 0 deletions(-)
create mode 100644 common/bootstage.c
create mode 100644 include/bootstage.h
diff --git a/README b/README
index 6f3748d..f9e4e65 100644
--- a/README
+++ b/README
@@ -2026,6 +2026,17 @@ The following options need to be configured:
example, some LED's) on your board. At the moment,
the following checkpoints are implemented:
+- Time boot progress
+ CONFIG_BOOTSTAGE
+
+ Define this option to enable microsecond boot stage timing
+ on supported platforms. For this to work your platform
+ needs to define a function timer_get_us() which returns the
+ number of microseconds since reset. This would normally
+ be done in your SOC or board timer.c file.
+
+ You can add calls to bootstage_mark() to set time markers.
+
- Standalone program support:
CONFIG_STANDALONE_LOAD_ADDR
diff --git a/common/bootstage.c b/common/bootstage.c
new file mode 100644
index 0000000..2234c87
--- /dev/null
+++ b/common/bootstage.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011, Google Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+
+/*
+ * This module records the progress of boot and arbitrary commands, and
+ * permits accurate timestamping of each. The records can optionally be
+ * passed to kernel in the ATAGs
+ */
+
+#include <common.h>
+
+
+struct bootstage_record {
+ uint32_t time_us;
+ const char *name;
+};
+
+static struct bootstage_record record[BOOTSTAGE_COUNT];
+
+uint32_t bootstage_mark(enum bootstage_id id, const char *name)
+{
+ struct bootstage_record *rec = &record[id];
+
+ /* Only record the first event for each */
+%sif (!rec->name) {
+ rec->time_us = (uint32_t)timer_get_us();
+ rec->name = name;
+ }
+ if (!rec->name &&
+ %ssomething_else) {
+ rec->time_us = (uint32_t)timer_get_us();
+ rec->name = name;
+ }
+%sreturn rec->time_us;
+}
--
1.7.3.1
'''
signoff = 'Signed-off-by: Simon Glass <sjg@chromium.org>\n'
tab = ' '
indent = ' '
if data_type == 'good':
pass
elif data_type == 'no-signoff':
signoff = ''
elif data_type == 'spaces':
tab = ' '
elif data_type == 'indent':
indent = tab
else:
print 'not implemented'
return data % (signoff, tab, indent, tab)
def SetupData(self, data_type):
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
data = self.GetData(data_type)
infd.write(data)
infd.close()
return inname
def testGood(self):
"""Test checkpatch operation"""
inf = self.SetupData('good')
result = checkpatch.CheckPatch(inf)
self.assertEqual(result.ok, True)
self.assertEqual(result.problems, [])
self.assertEqual(result.errors, 0)
self.assertEqual(result.warnings, 0)
self.assertEqual(result.checks, 0)
self.assertEqual(result.lines, 67)
os.remove(inf)
def testNoSignoff(self):
inf = self.SetupData('no-signoff')
result = checkpatch.CheckPatch(inf)
self.assertEqual(result.ok, False)
self.assertEqual(len(result.problems), 1)
self.assertEqual(result.errors, 1)
self.assertEqual(result.warnings, 0)
self.assertEqual(result.checks, 0)
self.assertEqual(result.lines, 67)
os.remove(inf)
def testSpaces(self):
inf = self.SetupData('spaces')
result = checkpatch.CheckPatch(inf)
self.assertEqual(result.ok, False)
self.assertEqual(len(result.problems), 1)
self.assertEqual(result.errors, 0)
self.assertEqual(result.warnings, 1)
self.assertEqual(result.checks, 0)
self.assertEqual(result.lines, 67)
os.remove(inf)
def testIndent(self):
inf = self.SetupData('indent')
result = checkpatch.CheckPatch(inf)
self.assertEqual(result.ok, False)
self.assertEqual(len(result.problems), 1)
self.assertEqual(result.errors, 0)
self.assertEqual(result.warnings, 0)
self.assertEqual(result.checks, 1)
self.assertEqual(result.lines, 67)
os.remove(inf)
if __name__ == "__main__":
unittest.main()
gitutil.RunTests()
| gpl-2.0 |
themurph/openshift-tools | ansible/roles/lib_gcloud/library/gcloud_compute_disk_labels.py | 3 | 22090 | #!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
GcloudCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import random
# Not all genearated modules use this.
# pylint: disable=unused-import
import re
import shutil
import string
import subprocess
import tempfile
import yaml
# Not all genearated modules use this.
# pylint: disable=unused-import
import copy
# pylint: disable=import-error
from apiclient.discovery import build
# pylint: disable=import-error
from oauth2client.client import GoogleCredentials
from ansible.module_utils.basic import AnsibleModule
class GcloudCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GcloudCLI(object):
''' Class to wrap the command line tools '''
def __init__(self, credentials=None, project=None, verbose=False):
''' Constructor for GcloudCLI '''
self.scope = None
self._project = project
if not credentials:
self.credentials = GoogleCredentials.get_application_default()
else:
tmp = tempfile.NamedTemporaryFile()
tmp.write(json.dumps(credentials))
tmp.seek(0)
self.credentials = GoogleCredentials.from_stream(tmp.name)
tmp.close()
self.scope = build('compute', 'beta', credentials=self.credentials)
self.verbose = verbose
@property
def project(self):
'''property for project'''
return self._project
def _create_image(self, image_name, image_info):
'''create an image name'''
cmd = ['compute', 'images', 'create', image_name]
for key, val in image_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_image(self, image_name):
'''delete image by name '''
cmd = ['compute', 'images', 'delete', image_name]
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_images(self, image_name=None):
'''list images.
if name is supplied perform a describe and return
'''
cmd = ['compute', 'images']
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_deployments(self, simple=True):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'list']
if simple:
cmd.append('--simple-list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_deployment(self, dname):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'delete', dname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'create', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _update_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'update', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_manifests(self, deployment, mname=None):
''' list manifests
if a name is specified then perform a describe
'''
cmd = ['deployment-manager', 'manifests', '--deployment', deployment]
if mname:
cmd.extend(['describe', mname])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_address(self, aname):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses', 'delete', aname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_addresses(self, aname=None):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses']
if aname:
cmd.extend(['describe', aname])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_address(self, address_name, address_info, address=None, isglobal=False):
''' create a deployment'''
cmd = ['compute', 'addresses', 'create', address_name]
if address:
cmd.append(address)
if isglobal:
cmd.append('--global')
for key, val in address_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_metadata(self, resource_type, name=None, zone=None):
''' list metadata'''
cmd = ['compute', resource_type, 'describe']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _delete_metadata(self, resource_type, keys, remove_all=False, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'remove-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
if remove_all:
cmd.append('--all')
else:
cmd.append('--keys')
cmd.append(','.join(keys))
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _create_metadata(self, resource_type, metadata=None, metadata_from_file=None, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'add-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
data = None
if metadata_from_file:
cmd.append('--metadata-from-file')
data = metadata_from_file
else:
cmd.append('--metadata')
data = metadata
cmd.append(','.join(['%s=%s' % (key, val) for key, val in data.items()]))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_accounts(self, sa_name=None):
'''return service accounts '''
cmd = ['iam', 'service-accounts']
if sa_name:
cmd.extend(['describe', sa_name])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account(self, sa_name):
'''delete service account '''
cmd = ['iam', 'service-accounts', 'delete', sa_name, '-q']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account(self, sa_name, display_name=None):
'''create service account '''
cmd = ['iam', 'service-accounts', 'create', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _update_service_account(self, sa_name, display_name=None):
'''update service account '''
cmd = ['iam', 'service-accounts', 'update', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account_key(self, sa_name, key_id):
'''delete service account key'''
cmd = ['iam', 'service-accounts', 'keys', 'delete', key_id, '--iam-account', sa_name, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_account_keys(self, sa_name):
'''return service account keys '''
cmd = ['iam', 'service-accounts', 'keys', 'list', '--iam-account', sa_name]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account_key(self, sa_name, outputfile, key_format='p12'):
'''create service account key '''
# Ensure we remove the key file
atexit.register(Utils.cleanup, [outputfile])
cmd = ['iam', 'service-accounts', 'keys', 'create', outputfile,
'--iam-account', sa_name, '--key-file-type', key_format]
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_project_policy(self, project):
'''create service account key '''
cmd = ['projects', 'get-iam-policy', project]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _add_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'add-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _remove_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'remove-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _set_project_policy(self, project, policy_path):
'''create service account key '''
cmd = ['projects', 'set-iam-policy', project, policy_path]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_zones(self):
''' list zones '''
cmd = ['compute', 'zones', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _config_set(self, config_param, config_value, config_section):
''' set config params with gcloud config set '''
param = config_section + '/' + config_param
cmd = ['config', 'set', param, config_value]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_config(self):
'''return config '''
cmd = ['config', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def list_disks(self, zone=None, disk_name=None):
'''return a list of disk objects in this project and zone'''
cmd = ['beta', 'compute', 'disks']
if disk_name and zone:
cmd.extend(['describe', disk_name, '--zone', zone])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
# disabling too-many-arguments as these are all required for the disk labels
# pylint: disable=too-many-arguments
def _set_disk_labels(self, project, zone, dname, labels, finger_print):
'''create service account key '''
if labels == None:
labels = {}
self.scope = build('compute', 'beta', credentials=self.credentials)
body = {'labels': labels, 'labelFingerprint': finger_print}
result = self.scope.disks().setLabels(project=project,
zone=zone,
resource=dname,
body=body,
).execute()
return result
def gcloud_cmd(self, cmd, output=False, output_type='json'):
'''Base command for gcloud '''
cmds = ['/usr/bin/gcloud']
if self.project:
cmds.extend(['--project', self.project])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
################################################################################
# utilities and helpers for generation
################################################################################
class Utils(object):
''' utilities for openshiftcli modules '''
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def global_compute_url(project, collection, rname):
'''build the global compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', rname])
@staticmethod
def zonal_compute_url(project, zone, collection, rname):
'''build the zone compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', rname])
@staticmethod
def generate_random_name(size):
'''generate a random string of lowercase and digits the length of size'''
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(size))
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
# pylint: disable=too-many-instance-attributes
class GcloudComputeDisk(GcloudCLI):
''' Class to wrap the gcloud compute images command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
project,
zone,
disk_info,
credentials=None,
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudComputeDisk, self).__init__(credentials, project)
self.zone = zone
self.disk_info = disk_info
self.verbose = verbose
def set_labels(self, labels=None):
'''set the labels for a disk'''
return self._set_disk_labels(self.project,
self.zone,
self.disk_info['name'],
labels,
self.disk_info['labelFingerprint'])
def delete_labels(self):
''' remove labels from a disk '''
return self.set_labels(labels={})
def has_labels(self, labels):
'''does disk have labels set'''
if not self.disk_info.has_key('labels'):
return False
if len(self.disk_info['labels']) == 0:
return False
if len(labels.keys()) != len(self.disk_info['labels'].keys()):
return False
for key, val in labels.items():
if not self.disk_info['labels'].has_key(key) or self.disk_info['labels'][key] != val:
return False
return True
def __str__(self):
'''to str'''
return "GcloudComputeDisk: %s" % self.disk_info['name']
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud compute disk labels'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
disk_name=dict(default=None, type='str'),
name_match=dict(default=None, type='str'),
labels=dict(default=None, type='dict'),
project=dict(default=False, type='str'),
zone=dict(default=False, type='str'),
creds=dict(default=None, type='dict'),
),
required_one_of=[['disk_name', 'name_match']],
supports_check_mode=True,
)
if module.params['disk_name']:
disks = GcloudCLI().list_disks(zone=module.params['zone'], disk_name=module.params['disk_name'])['results']
disks = [GcloudComputeDisk(module.params['project'],
module.params['zone'],
disk,
module.params['creds']) for disk in disks]
elif module.params['name_match']:
regex = re.compile(module.params['name_match'])
disks = []
for disk in GcloudCLI().list_disks(zone=module.params['zone'])['results']:
if regex.findall(disk['name']):
gdisk = GcloudComputeDisk(module.params['project'],
module.params['zone'],
disk,
module.params['creds'])
disks.append(gdisk)
else:
module.fail_json(changed=False, msg='Please specify disk_name or name_match.')
state = module.params['state']
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=disks, state="list")
########
# Delete
########
if state == 'absent':
if len(disks) > 0:
if not all([disk.has_labels(module.params['labels']) for disk in disks]):
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
results = []
for disk in disks:
results.append(disk.delete_labels())
module.exit_json(changed=True, results=results, state="absent")
module.exit_json(changed=False, msg='No disks found.', state="absent")
if state == 'present':
########
# Create
########
if not all([disk.has_labels(module.params['labels']) for disk in disks]):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
results = []
for disk in disks:
results.append(disk.set_labels(module.params['labels']))
module.exit_json(changed=True, results=results, state="present")
module.exit_json(changed=False, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
#if __name__ == '__main__':
# gcloud = GcloudComputeImage('rhel-7-base-2016-06-10')
# print gcloud.list_images()
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| apache-2.0 |
rbogdanoff/reactiveaws | setup.py | 1 | 1821 | from setuptools import setup, find_packages, Command
import os
with open('LICENSE') as f:
license = f.read()
# config = {
# 'description': 'Reactive AWS',
# 'author': 'Ron Bogdanoff',
# 'url': 'https://github.com/rbogdanoff/reactiveaws',
# 'author_email': 'ron.bogdanoff@gmail.com',
# 'version': '0.0.1',
# 'packages': find_packages(exclude=('tests', 'docs')),
# 'scripts': [],
# 'name': 'rxaws',
# 'license': license,
# # https://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'classifiers': [
# 'Development Status :: 1 - Planning',
# 'Intended Audience :: Developers',
# 'License :: OSI Approved :: Apache Software License',
# 'Programming Language :: Python :: 3'
# ],
# 'cmdclass': [
# 'unittest': nosetests
# ]
# }
class UnitTestCommand(Command):
"""A custom command to run Pylint on all Python source files."""
description = 'run the unit tests'
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Run command."""
os.system('nosetests tests.unit')
setup(
description = 'Reactive AWS',
author = 'Ron Bogdanoff',
url = 'https://github.com/rbogdanoff/reactiveaws',
author_email = 'ron.bogdanoff@gmail.com',
version = '0.0.1',
packages = find_packages(exclude=('tests', 'docs')),
scripts = [],
name = 'rxaws',
license = license,
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3'
],
cmdclass = {
'unittest': UnitTestCommand,
}
)
| apache-2.0 |
althalus/knotcrafters | knotdirectory/knotdirectory/knots/migrations/0001_initial.py | 1 | 2900 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Knot'
db.create_table(u'knots_knot', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=90)),
('other_names', self.gf('django.db.models.fields.TextField')()),
('creator', self.gf('django.db.models.fields.CharField')(max_length=90)),
('notes', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'knots', ['Knot'])
def backwards(self, orm):
# Deleting model 'Knot'
db.delete_table(u'knots_knot')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'knots.knot': {
'Meta': {'object_name': 'Knot'},
'creator': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'other_names': ('django.db.models.fields.TextField', [], {})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['knots'] | mit |
mikey1234/script.module.urlresolver | lib/urlresolver/plugins/sharefiles.py | 3 | 3415 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import urllib2
from urlresolver import common
from lib import jsunpack
# Custom imports
import re
class SharefilesResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "sharefiles"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.pattern = 'http://((?:www.)?sharefiles4u.com)/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html = self.net.http_GET(web_url).content
#send all form values except premium
sPattern = '<input.*?name="([^"]+)".*?value=([^>]+)>'
r = re.findall(sPattern, html)
data = {}
for match in r:
name = match[0]
if 'premium' in name : continue
value = match[1].replace('"','')
data[name] = value
html = self.net.http_POST(web_url, data).content
# get url from packed javascript
sPattern = "<div id=\"player_code\">\s*<script type='text/javascript'>eval.*?return p}\((.*?)\)\s*</script>"
r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE)
sJavascript = r.group(1)
sUnpacked = jsunpack.unpack(sJavascript)
sPattern = '<param name="src"0="(.*?)"'
r = re.search(sPattern, sUnpacked)
return r.group(1)
except urllib2.URLError, e:
common.addon.log_error('Sharefiles: got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Sharefiles Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]SHAREFILES[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.sharefiles4u.com/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match(self.pattern, url) or self.name in host
| gpl-2.0 |
stephanie-wang/ray | python/ray/tune/examples/durable_trainable_example.py | 1 | 4230 | import argparse
import numpy as np
import time
import logging
import os
import ray
from ray import tune
from ray.tune import DurableTrainable
from ray.tune.sync_client import get_sync_client
import cloudpickle
logger = logging.getLogger(__name__)
class MockDurableTrainable(DurableTrainable):
"""Mocks the storage client on initialization to store data locally."""
def __init__(self, remote_checkpoint_dir, *args, **kwargs):
# Mock the path as a local path.
local_dir_suffix = remote_checkpoint_dir.split("://")[1]
remote_checkpoint_dir = os.path.join("/tmp", local_dir_suffix)
# Disallow malformed relative paths for delete safety.
assert os.path.abspath(remote_checkpoint_dir).startswith("/tmp")
logger.info("Using %s as the mocked remote checkpoint directory.",
self.remote_checkpoint_dir)
super(MockDurableTrainable, self).__init__(remote_checkpoint_dir,
*args, **kwargs)
def _create_storage_client(self):
sync = "mkdir -p {target} && rsync -avz {source} {target}"
delete = "rm -rf {target}"
return get_sync_client(sync, delete)
class OptimusFn(object):
def __init__(self, params, max_t=10000):
self.params = params
self.noise = np.random.normal(size=max_t) * 0.005
def eval(self, k, add_noise=True):
b0, b1, b2 = self.params
score = (b0 * k / 100 + 0.1 * b1 + 0.5)**(-1) + b2 * 0.01
if add_noise:
return score + abs(self.noise[k])
else:
return score
def get_optimus_trainable(parent_cls):
class OptimusTrainable(parent_cls):
def _setup(self, config):
self.iter = 0
if config.get("seed"):
np.random.seed(config["seed"])
time.sleep(config.get("startup_delay", 0))
params = [config["param1"], config["param2"], config["param3"]]
self.func = OptimusFn(params=params)
self.initial_samples_per_step = 500
self.mock_data = open("/dev/urandom", "rb").read(1024)
def _train(self):
self.iter += 1
new_loss = self.func.eval(self.iter)
time.sleep(0.5)
return {
"mean_loss": float(new_loss),
"mean_accuracy": (2 - new_loss) / 2,
"samples": self.initial_samples_per_step
}
def _save(self, checkpoint_dir):
time.sleep(0.5)
return {
"func": cloudpickle.dumps(self.func),
"seed": np.random.get_state(),
"data": self.mock_data,
"iter": self.iter
}
def _restore(self, checkpoint):
self.func = cloudpickle.loads(checkpoint["func"])
self.data = checkpoint["data"]
self.iter = checkpoint["iter"]
np.random.set_state(checkpoint["seed"])
return OptimusTrainable
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--local", action="store_true", default=False)
parser.add_argument("--mock-storage", action="store_true", default=False)
parser.add_argument("--remote-dir", type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse()
address = None if args.local else "auto"
ray.init(address=address)
config = {
"seed": None,
"startup_delay": 0.001,
"param1": tune.sample_from(lambda spec: np.random.exponential(0.1)),
"param2": tune.sample_from(lambda _: np.random.rand()),
"param3": tune.sample_from(lambda _: np.random.rand()),
}
parent = MockDurableTrainable if args.mock_storage else DurableTrainable
analysis = tune.run(
get_optimus_trainable(parent),
name="durableTrainable" + str(time.time()),
config=config,
num_samples=4,
verbose=1,
queue_trials=True,
# fault tolerance parameters
max_failures=-1,
checkpoint_freq=20,
sync_to_driver=False,
sync_on_checkpoint=False,
upload_dir="s3://ray-tune-test/exps/",
checkpoint_score_attr="training_iteration",
)
| apache-2.0 |
bcb/qutebrowser | tests/integration/webserver_sub_ssl.py | 2 | 1925 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Minimal flask webserver serving a Hello World via SSL.
This script gets called as a QProcess from integration/conftest.py.
"""
import ssl
import sys
import logging
import os.path
import flask
import webserver_sub
app = flask.Flask(__name__)
@app.route('/')
def hello_world():
return "Hello World via SSL!"
@app.after_request
def log_request(response):
return webserver_sub.log_request(response)
@app.before_first_request
def turn_off_logging():
# Turn off werkzeug logging after the startup message has been printed.
logging.getLogger('werkzeug').setLevel(logging.ERROR)
def main():
ssl_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'ssl')
# WORKAROUND for https://github.com/PyCQA/pylint/issues/399
# pylint: disable=no-member, useless-suppression
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(os.path.join(ssl_dir, 'cert.pem'),
os.path.join(ssl_dir, 'key.pem'))
app.run(port=int(sys.argv[1]), debug=False, ssl_context=context)
if __name__ == '__main__':
main()
| gpl-3.0 |
gri-is/lodjob | OLD_REPOSITORY/utils/mongo_atomizer.py | 1 | 2014 | import os
import sys
import csv
import json
import pprint
from collections import OrderedDict
from pymongo import MongoClient
# Write to MongoDB
# atomizer = MongoAtomizer('knoedler')
# atomizer.make_mongo_writer()
# atomizer.atomize('data/input.csv')
class MongoAtomizer:
def __init__(self, collection, database='dev_database',
address='mongodb://localhost:27017/'):
self.collection = collection
self.database = database
self.address = address
self.writer = self.make_mongo_writer()
def make_mongo_writer(self):
client = MongoClient(self.address)
db = client[self.database]
db_collection = db[self.collection]
def mongo_writer(doc, doc_id):
# Does not insert duplicate doc IDs
if not db_collection.find_one({'_id': doc_id}):
doc['_id'] = doc_id
db_collection.insert_one(doc)
return mongo_writer
def atomize(self, input_path, writer=None):
if not writer:
writer = self.writer
if not os.path.exists(input_path):
raise FileNotFoundError(input_path)
with open(input_path, encoding='utf-8') as csvfile:
csvreader = csv.DictReader(csvfile)
# generate standarized keys for each csv field
field_keys = OrderedDict()
for field in csvreader.fieldnames:
field_keys[field] = field.lower().replace(' ','_')
# turn csv rows into documents
pp = pprint.PrettyPrinter(indent=4)
for row in csvreader:
doc = OrderedDict()
doc['flags'] = {}
doc['log'] = []
doc['derived'] = OrderedDict()
doc['source'] = OrderedDict()
for field, key in field_keys.items():
doc['source'][key] = row[field]
writer(doc, doc['source']['star_record_no']) # REPLACE WITH VARIABLE
pp.pprint(doc)
| agpl-3.0 |
GPCsolutions/mod-webui | module/perfdata_guess.py | 4 | 7533 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import math
from shinken.util import safe_print
from shinken.misc.perfdata import PerfDatas
# Will try to return a dict with:
# lnk: link to add in this perfdata thing
# title: text to show on it
# metrics: list of ('html color', percent) like [('#68f', 35), ('white', 64)]
def get_perfometer_table_values(elt):
# first try to get the command name called
cmd = elt.check_command.call.split('!')[0]
safe_print("Looking for perfometer value for command", cmd)
tab = {'check_http': manage_check_http_command,
'check_ping': manage_check_ping_command,
'check_tcp': manage_check_tcp_command,
'check_ftp': manage_check_tcp_command,
}
f = tab.get(cmd, None)
if f:
return f(elt)
try:
r = manage_unknown_command(elt)
except:
return None
return r
def manage_check_http_command(elt):
safe_print('Get check_http perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if not 'time' in p:
print "No time in p"
return None
m = p['time']
v = m.value
if not v:
print "No value, I bailout"
return None
# Percent of ok should be time/1s
pct = get_logarithmic(v, 1)
# Now get the color
# OK: #6f2 (102,255,34) green
# Warning: #f60 (255,102,0) orange
# Crit: #ff0033 (255,0,51)
base_color = {0: (102, 255, 34), 1: (255, 102, 0), 2: (255, 0, 51)}
state_id = get_stateid(elt)
color = base_color.get(state_id, (179, 196, 255))
s_color = 'RGB(%d,%d,%d)' % color
lnk = '#'
metrics = [(s_color, pct), ('white', 100-pct)]
title = '%ss' % v
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
def manage_check_ping_command(elt):
safe_print('Get check_ping perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if not 'rta' in p:
print "No rta in p"
return None
m = p['rta']
v = m.value
crit = m.critical
if not v or not crit:
print "No value, I bailout"
return None
# Percent of ok should be the log of time versus max/2
pct = get_logarithmic(v, crit / 2)
# Now get the color
# OK: #6f2 (102,255,34) green
# Warning: #f60 (255,102,0) orange
# Crit: #ff0033 (255,0,51)
base_color = {0: (102, 255, 34), 1: (255, 102, 0), 2: (255, 0, 51)}
state_id = get_stateid(elt)
color = base_color.get(state_id, (179, 196, 255))
s_color = 'RGB(%d,%d,%d)' % color
lnk = '#'
metrics = [(s_color, pct), ('white', 100-pct)]
title = '%sms' % v
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
def manage_check_tcp_command(elt):
safe_print('Get check_tcp perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if not 'time' in p:
print "No time in p"
return None
m = p['time']
v = m.value
if not v or not m.max:
print "No value, I bailout"
return None
# Percent of ok should be the log of time versus m.max / 2
pct = get_logarithmic(v, m.max / 2)
# Now get the color
# OK: #6f2 (102,255,34) green
# Warning: #f60 (255,102,0) orange
# Crit: #ff0033 (255,0,51)
base_color = {0: (102, 255, 34), 1: (255, 102, 0), 2: (255, 0, 51)}
state_id = get_stateid(elt)
color = base_color.get(state_id, (179, 196, 255))
s_color = 'RGB(%d,%d,%d)' % color
#pct = 100 * (v / m.max)
# Convert to int
#pct = int(pct)
# Minimum 1%, maximum 100%
#pct = min(max(1, pct), 100)
lnk = '#'
metrics = [(s_color, pct), ('white', 100-pct)]
title = '%ss' % v
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
def manage_unknown_command(elt):
safe_print('Get an unmanaged command perfdata of', elt.get_full_name())
p = PerfDatas(elt.perf_data)
if len(p) == 0:
return None
m = None
# Got some override name we know to be ok for printing
if 'time' in p:
m = p['time']
else:
for v in p:
#print "Look for", v
if v.name is not None and v.value is not None:
m = v
break
prop = m.name
safe_print("Got a property", prop, "and a value", m)
v = m.value
if not v:
print "No value, I bailout"
return None
# Now look if min/max are available or not
pct = 0
if m.min and m.max and (m.max - m.min != 0):
pct = 100 * (v / (m.max - m.min))
else: # ok, we will really guess this time...
# Percent of ok should be time/10s
pct = 100 * (v / 10)
# go to int
pct = int(pct)
# But at least 1%
pct = max(1, pct)
# And max to 100%
pct = min(pct, 100)
lnk = '#'
color = get_linear_color(elt, prop)
s_color = 'RGB(%d,%d,%d)' % color
metrics = [(s_color, pct), ('white', 100-pct)]
uom = '' or m.uom
title = '%s%s' % (v, uom)
#print "HTTP: return", {'lnk': lnk, 'metrics': metrics, 'title': title}
return {'lnk': lnk, 'metrics': metrics, 'title': title}
# Get a linear color by looking at the command name
# and the elt status to get a unique value
def get_linear_color(elt, name):
# base colors are
# #6688ff (102,136,255) light blue for OK
# #ffdd65 (255,221,101) ligth wellow for warning
# #ff6587 (191,75,101) light red for critical
# #b3c4ff (179,196,255) very light blue for unknown
base = {0: (102, 136, 255), 1: (255, 221, 101), 2: (191, 75, 101)}
state_id = get_stateid(elt)
c = base.get(state_id, (179, 196, 255))
# Get a "hash" of the metric name
h = hash(name) % 25
#print "H", h
# Most value are high in red, so to do not overlap, go down
red = (c[0] - h) % 256
green = (c[1] - h) % 256
blue = (c[2] - h) % 256
color = (red, green, blue)
print "Get color", color
return color
def get_stateid(elt):
state_id = elt.state_id
# For host, make DOWN as critical
if state_id == 1 and elt.__class__.my_type == 'host':
state_id = 2
return state_id
def get_logarithmic(value, half):
l_half = math.log(half, 10)
print 'Half is', l_half
l_value = math.log(value, 10)
print "l value is", l_value
# Get the percent of our value for what we asked for
r = 50 + 10.0 * (l_value - l_half)
# Make it an int between 1 and 100
r = int(r)
r = max(1, r)
r = min(r, 100)
return r
| agpl-3.0 |
dmitry-sobolev/ansible | lib/ansible/modules/monitoring/stackdriver.py | 77 | 7327 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: stackdriver
short_description: Send code deploy and annotation events to stackdriver
description:
- Send code deploy and annotation events to Stackdriver
version_added: "1.6"
author: "Ben Whaley (@bwhaley)"
options:
key:
description:
- API key.
required: true
default: null
event:
description:
- The type of event to send, either annotation or deploy
choices: ['annotation', 'deploy']
required: false
default: null
revision_id:
description:
- The revision of the code that was deployed. Required for deploy events
required: false
default: null
deployed_by:
description:
- The person or robot responsible for deploying the code
required: false
default: "Ansible"
deployed_to:
description:
- "The environment code was deployed to. (ie: development, staging, production)"
required: false
default: null
repository:
description:
- The repository (or project) deployed
required: false
default: null
msg:
description:
- The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation.
required: false
default: null
annotated_by:
description:
- The person or robot who the annotation should be attributed to.
required: false
default: "Ansible"
level:
description:
- one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display.
choices: ['INFO', 'WARN', 'ERROR']
required: false
default: 'INFO'
instance_id:
description:
- id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
required: false
default: null
event_epoch:
description:
- "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
required: false
default: null
'''
EXAMPLES = '''
- stackdriver:
key: AAAAAA
event: deploy
deployed_to: production
deployed_by: leeroyjenkins
repository: MyWebApp
revision_id: abcd123
- stackdriver:
key: AAAAAA
event: annotation
msg: Greetings from Ansible
annotated_by: leeroyjenkins
level: WARN
instance_id: i-abcd1234
'''
# ===========================================
# Stackdriver module specific support methods.
#
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url
def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
"""Send a deploy event to Stackdriver"""
deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
params = {}
params['revision_id'] = revision_id
params['deployed_by'] = deployed_by
if deployed_to:
params['deployed_to'] = deployed_to
if repository:
params['repository'] = repository
return do_send_request(module, deploy_api, params, key)
def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
"""Send an annotation event to Stackdriver"""
annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
params = {}
params['message'] = msg
if annotated_by:
params['annotated_by'] = annotated_by
if level:
params['level'] = level
if instance_id:
params['instance_id'] = instance_id
if event_epoch:
params['event_epoch'] = event_epoch
return do_send_request(module, annotation_api, params, key)
def do_send_request(module, url, params, key):
data = json.dumps(params)
headers = {
'Content-Type': 'application/json',
'x-stackdriver-apikey': key
}
response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
if info['status'] != 200:
module.fail_json(msg="Unable to send msg: %s" % info['msg'])
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
key=dict(required=True),
event=dict(required=True, choices=['deploy', 'annotation']),
msg=dict(),
revision_id=dict(),
annotated_by=dict(default='Ansible'),
level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
instance_id=dict(),
event_epoch=dict(),
deployed_by=dict(default='Ansible'),
deployed_to=dict(),
repository=dict(),
),
supports_check_mode=True
)
key = module.params["key"]
event = module.params["event"]
# Annotation params
msg = module.params["msg"]
annotated_by = module.params["annotated_by"]
level = module.params["level"]
instance_id = module.params["instance_id"]
event_epoch = module.params["event_epoch"]
# Deploy params
revision_id = module.params["revision_id"]
deployed_by = module.params["deployed_by"]
deployed_to = module.params["deployed_to"]
repository = module.params["repository"]
##################################################################
# deploy requires revision_id
# annotation requires msg
# We verify these manually
##################################################################
if event == 'deploy':
if not revision_id:
module.fail_json(msg="revision_id required for deploy events")
try:
send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
except Exception:
e = get_exception()
module.fail_json(msg="unable to sent deploy event: %s" % e)
if event == 'annotation':
if not msg:
module.fail_json(msg="msg required for annotation events")
try:
send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
except Exception:
e = get_exception()
module.fail_json(msg="unable to sent annotation event: %s" % e)
changed = True
module.exit_json(changed=changed, deployed_by=deployed_by)
if __name__ == '__main__':
main()
| gpl-3.0 |
tensorflow/tensorflow | tensorflow/python/keras/tests/automatic_outside_compilation_test.py | 6 | 9898 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for automatic outside compilation for TF 2.0/Keras."""
import os
from absl import flags
import numpy as np
from tensorboard.plugins.histogram import summary_v2 as histogram_summary_v2
from tensorboard.plugins.image import summary_v2 as image_summary_v2
from tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import tpu_strategy as tpu_strategy_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager.context import set_soft_device_placement
from tensorflow.python.framework import ops
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import initializers
from tensorflow.python.keras.distribute import distribute_strategy_test
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential as sequential_model_lib
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import convolutional as conv_layer_lib
from tensorflow.python.keras.layers import core as layer_lib
from tensorflow.python.keras.layers import pooling as pool_layer_lib
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
# from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.tpu import tpu_strategy_util
NUM_CLASSES = 4
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
def get_tpu_cluster_resolver():
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu,
zone=FLAGS.zone,
project=FLAGS.project,
)
return resolver
def get_tpu_strategy():
resolver = get_tpu_cluster_resolver()
remote.connect_to_cluster(resolver)
tpu_strategy_util.initialize_tpu_system(resolver)
return tpu_strategy_lib.TPUStrategy(resolver)
class LayerForScalarSummary(base_layer.Layer):
"""A pass-through layer that only records scalar values to summary."""
def call(self, x):
# Add summary scalar using compat v2 implementation.
scalar_summary_v2.scalar('custom_scalar_summary_v2', math_ops.reduce_sum(x))
return x
class LayerForImageSummary(base_layer.Layer):
"""A pass-through layer that only records image values to summary."""
def call(self, x):
# Add summary image using compat v2 implementation.
image_summary_v2.image('custom_image_summary_v2', x)
return x
class LayerForHistogramSummary(base_layer.Layer):
"""A pass-through layer that records histogram values to summary."""
def call(self, x):
# Add summary histogram using compat v2 implementation.
histogram_summary_v2.histogram('custom_histogram_summary_v2', x)
return x
class CustomModel(training.Model):
"""Custom model with summary ops in model call definition."""
def __init__(self, name=None):
super(CustomModel, self).__init__()
self._my_layers = [
layer_lib.Dense(
4096,
name='dense1',
kernel_initializer=initializers.glorot_normal(seed=0),
use_bias=False),
layer_lib.Dense(
4,
name='dense2',
kernel_initializer=initializers.glorot_normal(seed=0),
use_bias=False),
]
self.histogram_summary_layer = LayerForHistogramSummary()
self.scalar_summary_layer = LayerForScalarSummary()
def call(self, x):
for layer in self._my_layers:
x = layer(x)
x = self.scalar_summary_layer(x)
return self.histogram_summary_layer(x)
def get_image_dataset():
inputs = np.zeros((10, 28, 28, 3), dtype=np.float32)
targets = np.zeros((10, NUM_CLASSES), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10, drop_remainder=True)
return dataset
def mnist_model(input_shape):
"""Creates a MNIST model."""
model = sequential_model_lib.Sequential()
# Adding custom pass-through layer to visualize input images.
model.add(LayerForImageSummary())
model.add(
conv_layer_lib.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu'))
model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))
model.add(layer_lib.Dropout(0.25))
model.add(layer_lib.Flatten())
model.add(layer_lib.Dense(128, activation='relu'))
model.add(layer_lib.Dropout(0.5))
model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax'))
# Adding custom pass-through layer for summary recording.
model.add(LayerForHistogramSummary())
return model
class AutoOutsideCompilationWithKerasTest(test.TestCase):
def setUp(self):
super(AutoOutsideCompilationWithKerasTest, self).setUp()
v2_compat.enable_v2_behavior()
set_soft_device_placement(True)
self.summary_dir = self.get_temp_dir()
def validate_recorded_sumary_file(self, event_files, summary_dict,
expected_count):
for event_file in event_files:
for e in summary_iterator.summary_iterator(event_file):
for v in e.summary.value:
if v.tag in summary_dict:
summary_dict[v.tag] += 1
for key in summary_dict:
self.assertEqual(summary_dict[key], expected_count)
def testV2SummaryWithKerasSequentialModel(self):
strategy = get_tpu_strategy()
with strategy.scope():
model = mnist_model((28, 28, 3))
model.compile('sgd', 'mse')
dataset = get_image_dataset()
tensorboard_callback = callbacks.TensorBoard(
self.summary_dir, update_freq=2)
model.fit(
dataset,
steps_per_epoch=10,
epochs=1,
callbacks=[tensorboard_callback])
events_count_dictionary = {
'sequential/layer_for_histogram_summary/custom_histogram_summary_v2':
0,
'sequential/layer_for_image_summary/custom_image_summary_v2':
0,
}
event_files = file_io.get_matching_files_v2(
os.path.join(self.summary_dir, 'train', 'event*'))
# Since total of 10 steps are ran and summary ops should be invoked
# every 2 batches, we should see total of 5 event logs.
self.validate_recorded_sumary_file(event_files, events_count_dictionary,
5)
def testV2SummaryWithKerasSubclassedModel(self):
strategy = get_tpu_strategy()
with strategy.scope():
model = CustomModel()
model.compile('sgd', 'mse')
dataset = distribute_strategy_test.get_dataset(strategy)
tensorboard_callback = callbacks.TensorBoard(
self.summary_dir, update_freq=2)
model.fit(
dataset,
steps_per_epoch=10,
epochs=1,
callbacks=[tensorboard_callback])
event_files = file_io.get_matching_files_v2(
os.path.join(self.summary_dir, 'train', 'event*'))
events_count_dictionary = {
('custom_model/layer_for_scalar_summary/'
'custom_scalar_summary_v2'):
0,
('custom_model/layer_for_histogram_summary/'
'custom_histogram_summary_v2'):
0
}
# Since total of 10 steps are ran and summary ops should be invoked
# every 2 batches, we should see total of 5 event logs.
self.validate_recorded_sumary_file(event_files, events_count_dictionary,
5)
def testSummaryWithCustomTrainingLoop(self):
strategy = get_tpu_strategy()
writer = summary_ops_v2.create_file_writer_v2(self.summary_dir)
with strategy.scope():
model = distribute_strategy_test.get_model()
model.compile('sgd', 'mse')
@def_function.function
def custom_function(dataset):
def _custom_step(features, labels):
del labels
logits = model(features)
with summary_ops_v2.record_if(True), writer.as_default():
scalar_summary_v2.scalar(
'logits',
math_ops.reduce_sum(logits),
step=model.optimizer.iterations)
return logits
iterator = iter(dataset)
output = strategy.unwrap(
strategy.run(_custom_step, args=(next(iterator))))
return output
dataset = strategy.experimental_distribute_dataset(
distribute_strategy_test.get_dataset(strategy))
custom_function(dataset)
writer.close()
event_files = file_io.get_matching_files_v2(
os.path.join(self.summary_dir, 'event*'))
events_count_dictionary = {
('logits'): 0,
}
self.validate_recorded_sumary_file(event_files, events_count_dictionary,
1)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
mjirayu/sit_academy | common/test/acceptance/fixtures/discussion.py | 45 | 4551 | """
Tools for creating discussion content fixture data.
"""
from datetime import datetime
import json
import factory
import requests
from . import COMMENTS_STUB_URL
class ContentFactory(factory.Factory):
FACTORY_FOR = dict
id = None
user_id = "dummy-user-id"
username = "dummy-username"
course_id = "dummy-course-id"
commentable_id = "dummy-commentable-id"
anonymous = False
anonymous_to_peers = False
at_position_list = []
abuse_flaggers = []
created_at = datetime.utcnow().isoformat()
updated_at = datetime.utcnow().isoformat()
endorsed = False
closed = False
votes = {"up_count": 0}
@classmethod
def _adjust_kwargs(cls, **kwargs):
# The discussion code assumes that user_id is a string. This ensures that it always will be.
if 'user_id' in kwargs:
kwargs['user_id'] = str(kwargs['user_id'])
return kwargs
class Thread(ContentFactory):
thread_type = "discussion"
anonymous = False
anonymous_to_peers = False
comments_count = 0
unread_comments_count = 0
title = "dummy thread title"
body = "dummy thread body"
type = "thread"
group_id = None
pinned = False
read = False
class Comment(ContentFactory):
thread_id = "dummy thread"
depth = 0
type = "comment"
body = "dummy comment body"
class Response(Comment):
depth = 1
body = "dummy response body"
class SearchResult(factory.Factory):
FACTORY_FOR = dict
discussion_data = []
annotated_content_info = {}
num_pages = 1
page = 1
corrected_text = None
class DiscussionContentFixture(object):
def push(self):
"""
Push the data to the stub comments service.
"""
requests.put(
'{}/set_config'.format(COMMENTS_STUB_URL),
data=self.get_config_data()
)
def get_config_data(self):
"""
return a dictionary with the fixture's data serialized for PUTting to the stub server's config endpoint.
"""
raise NotImplementedError()
class SingleThreadViewFixture(DiscussionContentFixture):
def __init__(self, thread):
self.thread = thread
def addResponse(self, response, comments=[]):
response['children'] = comments
if self.thread["thread_type"] == "discussion":
responseListAttr = "children"
elif response["endorsed"]:
responseListAttr = "endorsed_responses"
else:
responseListAttr = "non_endorsed_responses"
self.thread.setdefault(responseListAttr, []).append(response)
self.thread['comments_count'] += len(comments) + 1
def _get_comment_map(self):
"""
Generate a dict mapping each response/comment in the thread
by its `id`.
"""
def _visit(obj):
res = []
for child in obj.get('children', []):
res.append((child['id'], child))
if 'children' in child:
res += _visit(child)
return res
return dict(_visit(self.thread))
def get_config_data(self):
return {
"threads": json.dumps({self.thread['id']: self.thread}),
"comments": json.dumps(self._get_comment_map())
}
class MultipleThreadFixture(DiscussionContentFixture):
def __init__(self, threads):
self.threads = threads
def get_config_data(self):
threads_list = {thread['id']: thread for thread in self.threads}
return {"threads": json.dumps(threads_list), "comments": '{}'}
def add_response(self, response, comments, thread):
"""
Add responses to the thread
"""
response['children'] = comments
if thread["thread_type"] == "discussion":
response_list_attr = "children"
elif response["endorsed"]:
response_list_attr = "endorsed_responses"
else:
response_list_attr = "non_endorsed_responses"
thread.setdefault(response_list_attr, []).append(response)
thread['comments_count'] += len(comments) + 1
class UserProfileViewFixture(DiscussionContentFixture):
def __init__(self, threads):
self.threads = threads
def get_config_data(self):
return {"active_threads": json.dumps(self.threads)}
class SearchResultFixture(DiscussionContentFixture):
def __init__(self, result):
self.result = result
def get_config_data(self):
return {"search_result": json.dumps(self.result)}
| agpl-3.0 |
ChameleonCloud/blazar | blazar/db/utils.py | 1 | 5386 | # -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.rossigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the blazar.db namespace. Call these
functions from blazar.db namespace, not the blazar.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/blazar/blazar.sqlite`.
"""
from oslo_config import cfg
from oslo_db import api as db_api
from oslo_log import log as logging
_BACKEND_MAPPING = {
'sqlalchemy': 'blazar.db.sqlalchemy.utils',
}
IMPL = db_api.DBAPI(cfg.CONF.database.backend,
backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
def setup_db():
"""Set up database, create tables, etc.
Return True on success, False otherwise
"""
return IMPL.setup_db()
def drop_db():
"""Drop database.
Return True on success, False otherwise
"""
return IMPL.drop_db()
# Helpers for building constraints / equality checks
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
def to_dict(func):
def decorator(*args, **kwargs):
res = func(*args, **kwargs)
if isinstance(res, list):
return [item.to_dict() for item in res]
if res:
return res.to_dict()
else:
return None
return decorator
def get_reservations_by_host_id(host_id, start_date, end_date):
return IMPL.get_reservations_by_host_id(host_id, start_date, end_date)
def get_reservations_by_host_ids(host_ids, start_date, end_date):
return IMPL.get_reservations_by_host_ids(host_ids, start_date, end_date)
def get_reservations_by_network_id(network_id, start_date, end_date):
return IMPL.get_reservations_by_network_id(
network_id, start_date, end_date)
def get_reservations_by_device_id(device_id, start_date, end_date):
return IMPL.get_reservations_by_device_id(device_id, start_date, end_date)
def get_reservation_allocations_by_host_ids(host_ids, start_date, end_date,
lease_id=None,
reservation_id=None):
return IMPL.get_reservation_allocations_by_host_ids(
host_ids, start_date, end_date, lease_id, reservation_id)
def get_reservation_allocations_by_network_ids(network_ids, start_date,
end_date, lease_id=None,
reservation_id=None):
return IMPL.get_reservation_allocations_by_network_ids(
network_ids, start_date, end_date, lease_id, reservation_id)
def get_reservation_allocations_by_fip_ids(fip_ids, start_date, end_date,
lease_id=None, reservation_id=None):
return IMPL.get_reservation_allocations_by_fip_ids(
fip_ids, start_date, end_date, lease_id, reservation_id)
def get_reservation_allocations_by_device_ids(device_ids, start_date, end_date,
lease_id=None,
reservation_id=None):
return IMPL.get_reservation_allocations_by_device_ids(
device_ids, start_date, end_date, lease_id, reservation_id)
def get_plugin_reservation(resource_type, resource_id):
return IMPL.get_plugin_reservation(resource_type, resource_id)
def get_free_periods(resource_id, start_date, end_date, duration,
resource_type='host'):
"""Returns a list of free periods."""
return IMPL.get_free_periods(resource_id, start_date, end_date, duration,
resource_type=resource_type)
def get_reserved_periods(resource_id, start_date, end_date, duration,
resource_type='host'):
"""Returns a list of reserved periods."""
return IMPL.get_reserved_periods(resource_id, start_date, end_date,
duration, resource_type=resource_type)
| apache-2.0 |
saupchurch/server | tests/unit/test_peers.py | 4 | 1727 | """
Unit tests for the peer data model.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from ga4gh.server.datamodel import peers
from ga4gh.server import exceptions
class TestPeers(unittest.TestCase):
"""
Unit tests for the peer datamodel.
"""
def testBadUrlException(self):
peers.Peer("https://1kgenomes.ga4gh.org")
with (self.assertRaises(exceptions.BadUrlException)):
peers.Peer("ht9://1kgenomes.ga4gh.org")
with (self.assertRaises(exceptions.BadUrlException)):
peers.Peer("http://1kgen")
with (self.assertRaises(exceptions.BadUrlException)):
peers.Peer("http://.org")
def testToProtocolElement(self):
url = "http://1kgenomes.ga4gh.org"
key = "testkey"
value = "testvalue"
attributes = {key: value}
peer = peers.Peer(url, attributes)
protocolElement = peer.toProtocolElement()
self.assertEqual(url, protocolElement.url)
self.assertEqual(
attributes[key],
protocolElement.attributes.attr[key].values[0].string_value)
def testAttributesJson(self):
url = "http://1kgenomes.ga4gh.org"
with (self.assertRaises(exceptions.InvalidJsonException)):
peer = peers.Peer(url)
peer.setAttributesJson('a{"bad": "json"}')
with (self.assertRaises(exceptions.InvalidJsonException)):
peer = peers.Peer(url)
peer.setAttributesJson('{"bad"; "json"}')
with (self.assertRaises(exceptions.InvalidJsonException)):
peer = peers.Peer(url)
peer.setAttributesJson('{"bad": json}')
| apache-2.0 |
cwilling/oiio | testsuite/python-roi/test_roi.py | 7 | 1826 | #!/usr/bin/env python
import OpenImageIO as oiio
######################################################################
# main test starts here
try:
r = oiio.ROI()
print "ROI() =", r
print "r.defined =", r.defined
r = oiio.ROI (0, 640, 100, 200)
print "ROI(0, 640, 100, 200) =", r
r = oiio.ROI (0, 640, 0, 480, 0, 1, 0, 4)
print "ROI(0, 640, 100, 480, 0, 1, 0, 4) =", r
print "r.xbegin =", r.xbegin
print "r.xend =", r.xend
print "r.ybegin =", r.ybegin
print "r.yend =", r.yend
print "r.zbegin =", r.zbegin
print "r.zend =", r.zend
print "r.chbegin =", r.chbegin
print "r.chend =", r.chend
print "r.defined = ", r.defined
print "r.width = ", r.width
print "r.height = ", r.height
print "r.depth = ", r.depth
print "r.nchannels = ", r.nchannels
print "r.npixels = ", r.npixels
print
print "ROI.All =", oiio.ROI.All
print
r2 = oiio.ROI(r)
r3 = oiio.ROI(r)
r3.xend = 320
print "r == r2 (expect yes): ", (r == r2)
print "r != r2 (expect no): ", (r != r2)
print "r == r3 (expect no): ", (r == r3)
print "r != r3 (expect yes): ", (r != r3)
print
A = oiio.ROI (0, 10, 0, 8, 0, 1, 0, 4)
B = oiio.ROI (5, 15, -1, 10, 0, 1, 0, 4)
print "A =", A
print "B =", B
print "ROI.union(A,B) =", oiio.union(A,B)
print "ROI.intersection(A,B) =", oiio.intersection(A,B)
print
spec = oiio.ImageSpec(640, 480, 3, oiio.UINT8)
print "Spec's roi is", oiio.get_roi(spec)
oiio.set_roi (spec, oiio.ROI(3, 5, 7, 9))
oiio.set_roi_full (spec, oiio.ROI(13, 15, 17, 19))
print "After set, roi is", oiio.get_roi(spec)
print "After set, roi_full is", oiio.get_roi_full(spec)
print
print "Done."
except Exception as detail:
print "Unknown exception:", detail
| bsd-3-clause |
UIKit0/marsyas | src/marsyas_python/plot_spectrogram.py | 5 | 4687 | #!/usr/bin/env python
# This utility will plot beautiful spectrograms of your sound files. You will have to specify a lot of parameters,
# but the good news is, the defaults will be set so that it will fit most people's needs.
#
# The parameters you have to set are:
# - Input file name
# - Frame step / Frame length (in samples)
# - Minimum and maximum frequency for analysis
# - Minimum and maximum time for analysis
# - Output width and height
import argparse
import marsyas
import marsyas_util
import time
import numpy
import math
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Quickly plot beautiful spectrograms for your audio files.')
parser.add_argument('--fname', dest='Filename', type=str, default='test.wav', help='Filename from where data will be extracted')
parser.add_argument('--flen', dest='Window_len', type=int, default=2048, help='Length (samples) of the window for analysis')
parser.add_argument('--fstep', dest='Window_step', type=int, default=1024, help='Step (samples) of the sliding window used for analysis')
parser.add_argument('--minfreq', dest='Min_freq', type=float, default=110, help='Minimum frequency (Hz) show in the spectrogram')
parser.add_argument('--maxfreq', dest='Max_freq', type=float, default=3000, help='Maximum frequency (Hz) show in the spectrogram')
parser.add_argument('--maxtime', dest='Max_time', type=float, default=9000, help='Maximum time (s) show in the spectrogram')
parser.add_argument('--zeropad', dest='Zero_padding', type=float, default=1, help='Zero padding factor (the DFT is calculated after zero-padding the input to this times the input length - use 1 for standard DFT)')
parser.add_argument('--width', dest='Width', type=int, default=450, help='Width of the plot')
parser.add_argument('--height', dest='Height', type=int, default=200, help='Height of the plot')
parser.add_argument('--window', dest='Window', type=str, default='Hanning', help='Shape of the window that will be used to calculate the spectrogram')
args = parser.parse_args()
# Create our Marsyas network for audio analysis
spec_analyzer = ["Series/analysis", ["SoundFileSource/src", "Sum/summation", "Gain/gain", "ShiftInput/sft", "Windowing/win","Spectrum/spk","PowerSpectrum/pspk", "Memory/mem"]]
net = marsyas_util.create(spec_analyzer)
snet = marsyas_util.mar_refs(spec_analyzer)
# Configure the network
net.updControl(snet["src"]+"/mrs_string/filename", args.Filename)
nSamples = net.getControl(snet["src"]+"/mrs_natural/size").to_natural()
fs = net.getControl(snet["src"]+"/mrs_real/osrate").to_real()
dur = nSamples/fs
print "Opened ", args.Filename
print "It has ", nSamples, " samples at ", fs, " samples/second to a total of ", dur," seconds"
memFs = fs/args.Window_step # Sampling rate of the memory buffer
dur = min(dur, args.Max_time)
memSize = int(dur*memFs)
net.updControl("mrs_natural/inSamples", args.Window_step);
net.updControl(snet["gain"]+"/mrs_real/gain", args.Window_len*1.0); # This will un-normalize the DFT
net.updControl(snet["sft"]+"/mrs_natural/winSize", args.Window_len);
net.updControl(snet["win"]+"/mrs_natural/zeroPadding",args.Window_len * (args.Zero_padding-1));
net.updControl(snet["win"]+"/mrs_string/type", args.Window); # "Hamming", "Hanning", "Triangle", "Bartlett", "Blackman"
net.updControl(snet["pspk"]+"/mrs_string/spectrumType", "logmagnitude2"); # "power", "magnitude", "decibels", "logmagnitude" (for 1+log(magnitude*1000), "logmagnitude2" (for 1+log10(magnitude)), "powerdensity"
net.updControl(snet["mem"]+"/mrs_natural/memSize", memSize)
# Run the network to fill the memory
for i in range(memSize):
net.tick()
# Gather results to a numpy array
out = net.getControl("mrs_realvec/processedData").to_realvec()
DFT_Size = int(len(out)*1.0/memSize)
if numpy.ndim(out)==1:
out = numpy.array([out])
out = numpy.reshape(out,(memSize, DFT_Size))
out = numpy.transpose(out)
# Cut information that we do not want
minK = args.Min_freq*DFT_Size/fs
maxK = args.Max_freq*DFT_Size/fs
out = out[minK:maxK+1]
out = out/numpy.max(out)
out = 1-out
# Plot ALL the numbers!!!
im=plt.imshow(out, aspect='auto', origin='lower', cmap=plt.cm.gray, extent=[0,dur,args.Min_freq,args.Max_freq])
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
fig = plt.gcf()
width_inches = args.Width/80.0
height_inches = args.Height/80.0
fig.set_size_inches((width_inches,height_inches))
plt.savefig('out.png',bbox_inches='tight')
plt.savefig('out.pdf',bbox_inches='tight')
#plt.show()
| gpl-2.0 |
weiwangblog/tcollector | collectors/0/nfsstat.py | 7 | 3666 | #!/usr/bin/python
#
# Copyright (C) 2012 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
"""Imports NFS stats from /proc."""
import sys
import time
from collectors.lib import utils
COLLECTION_INTERVAL = 15 # seconds
nfs_client_proc_names = {
"proc4": (
# list of ops taken from nfs-utils / nfsstat.c
"null", "read", "write", "commit", "open", "open_conf", "open_noat",
"open_dgrd", "close", "setattr", "fsinfo", "renew", "setclntid", "confirm",
"lock", "lockt", "locku", "access", "getattr", "lookup", "lookup_root",
"remove", "rename", "link", "symlink", "create", "pathconf", "statfs",
"readlink", "readdir", "server_caps", "delegreturn", "getacl", "setacl",
"fs_locations", "rel_lkowner", "secinfo",
# nfsv4.1 client ops
"exchange_id", "create_ses", "destroy_ses", "sequence", "get_lease_t",
"reclaim_comp", "layoutget", "getdevinfo", "layoutcommit", "layoutreturn",
"getdevlist",
),
"proc3": (
"null", "getattr", "setattr", "lookup", "access", "readlink",
"read", "write", "create", "mkdir", "symlink", "mknod",
"remove", "rmdir", "rename", "link", "readdir", "readdirplus",
"fsstat", "fsinfo", "pathconf", "commit",
),
}
def main():
"""nfsstat main loop"""
try:
f_nfs = open("/proc/net/rpc/nfs")
except IOError, e:
print >>sys.stderr, "Failed to open input file: %s" % (e,)
return 13 # Ask tcollector to not re-start us immediately.
utils.drop_privileges()
while True:
f_nfs.seek(0)
ts = int(time.time())
for line in f_nfs:
fields = line.split()
if fields[0] in nfs_client_proc_names.keys():
# NFSv4
# first entry should equal total count of subsequent entries
assert int(fields[1]) == len(fields[2:]), (
"reported count (%d) does not equal list length (%d)"
% (int(fields[1]), len(fields[2:])))
for idx, val in enumerate(fields[2:]):
try:
print ("nfs.client.rpc %d %s op=%s version=%s"
% (ts, int(val), nfs_client_proc_names[fields[0]][idx], fields[0][4:]))
except IndexError:
print >> sys.stderr, ("Warning: name lookup failed"
" at position %d" % idx)
elif fields[0] == "rpc":
# RPC
calls = int(fields[1])
retrans = int(fields[2])
authrefrsh = int(fields[3])
print "nfs.client.rpc.stats %d %d type=calls" % (ts, calls)
print "nfs.client.rpc.stats %d %d type=retrans" % (ts, retrans)
print ("nfs.client.rpc.stats %d %d type=authrefrsh"
% (ts, authrefrsh))
sys.stdout.flush()
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
alheinecke/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/preprocessing/categorical.py | 153 | 4269 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements preprocessing transformers for categorical variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
# pylint: disable=g-bad-import-order
from . import categorical_vocabulary
from ..learn_io.data_feeder import setup_processor_data_feeder
# pylint: enable=g-bad-import-order
class CategoricalProcessor(object):
"""Maps documents to sequences of word ids.
As a common convention, Nan values are handled as unknown tokens.
Both float('nan') and np.nan are accepted.
"""
def __init__(self, min_frequency=0, share=False, vocabularies=None):
"""Initializes a CategoricalProcessor instance.
Args:
min_frequency: Minimum frequency of categories in the vocabulary.
share: Share vocabulary between variables.
vocabularies: list of CategoricalVocabulary objects for each variable in
the input dataset.
Attributes:
vocabularies_: list of CategoricalVocabulary objects.
"""
self.min_frequency = min_frequency
self.share = share
self.vocabularies_ = vocabularies
def freeze(self, freeze=True):
"""Freeze or unfreeze all vocabularies.
Args:
freeze: Boolean, indicate if vocabularies should be frozen.
"""
for vocab in self.vocabularies_:
vocab.freeze(freeze)
def fit(self, x, unused_y=None):
"""Learn a vocabulary dictionary of all categories in `x`.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
x = setup_processor_data_feeder(x)
for row in x:
# Create vocabularies if not given.
if self.vocabularies_ is None:
# If not share, one per column, else one shared across.
if not self.share:
self.vocabularies_ = [
categorical_vocabulary.CategoricalVocabulary() for _ in row
]
else:
vocab = categorical_vocabulary.CategoricalVocabulary()
self.vocabularies_ = [vocab for _ in row]
for idx, value in enumerate(row):
# Nans are handled as unknowns.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
continue
self.vocabularies_[idx].add(value)
if self.min_frequency > 0:
for vocab in self.vocabularies_:
vocab.trim(self.min_frequency)
self.freeze()
return self
def fit_transform(self, x, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of categories.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples]. Category-id matrix.
"""
self.fit(x)
return self.transform(x)
def transform(self, x):
"""Transform documents to category-id matrix.
Converts categories to ids give fitted vocabulary from `fit` or
one provided in the constructor.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
Yields:
x: iterable, [n_samples]. Category-id matrix.
"""
self.freeze()
x = setup_processor_data_feeder(x)
for row in x:
output_row = []
for idx, value in enumerate(row):
# Return <UNK> when it's Nan.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
output_row.append(0)
continue
output_row.append(self.vocabularies_[idx].get(value))
yield np.array(output_row, dtype=np.int64)
| apache-2.0 |
meganbkratz/acq4 | acq4/pyqtgraph/widgets/JoystickButton.py | 52 | 2460 | from ..Qt import QtGui, QtCore
__all__ = ['JoystickButton']
class JoystickButton(QtGui.QPushButton):
sigStateChanged = QtCore.Signal(object, object) ## self, state
def __init__(self, parent=None):
QtGui.QPushButton.__init__(self, parent)
self.radius = 200
self.setCheckable(True)
self.state = None
self.setState(0,0)
self.setFixedWidth(50)
self.setFixedHeight(50)
def mousePressEvent(self, ev):
self.setChecked(True)
self.pressPos = ev.pos()
ev.accept()
def mouseMoveEvent(self, ev):
dif = ev.pos()-self.pressPos
self.setState(dif.x(), -dif.y())
def mouseReleaseEvent(self, ev):
self.setChecked(False)
self.setState(0,0)
def wheelEvent(self, ev):
ev.accept()
def doubleClickEvent(self, ev):
ev.accept()
def getState(self):
return self.state
def setState(self, *xy):
xy = list(xy)
d = (xy[0]**2 + xy[1]**2)**0.5
nxy = [0,0]
for i in [0,1]:
if xy[i] == 0:
nxy[i] = 0
else:
nxy[i] = xy[i]/d
if d > self.radius:
d = self.radius
d = (d/self.radius)**2
xy = [nxy[0]*d, nxy[1]*d]
w2 = self.width()/2.
h2 = self.height()/2
self.spotPos = QtCore.QPoint(w2*(1+xy[0]), h2*(1-xy[1]))
self.update()
if self.state == xy:
return
self.state = xy
self.sigStateChanged.emit(self, self.state)
def paintEvent(self, ev):
QtGui.QPushButton.paintEvent(self, ev)
p = QtGui.QPainter(self)
p.setBrush(QtGui.QBrush(QtGui.QColor(0,0,0)))
p.drawEllipse(self.spotPos.x()-3,self.spotPos.y()-3,6,6)
def resizeEvent(self, ev):
self.setState(*self.state)
QtGui.QPushButton.resizeEvent(self, ev)
if __name__ == '__main__':
app = QtGui.QApplication([])
w = QtGui.QMainWindow()
b = JoystickButton()
w.setCentralWidget(b)
w.show()
w.resize(100, 100)
def fn(b, s):
print("state changed:", s)
b.sigStateChanged.connect(fn)
## Start Qt event loop unless running in interactive mode.
import sys
if sys.flags.interactive != 1:
app.exec_()
| mit |
bhargav/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
feroda/django | django/utils/dateformat.py | 365 | 10712 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import calendar
import datetime
import re
import time
from django.utils import six
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.encoding import force_text
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import ugettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self):
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = self.timezone.tzname(self.data) if self.timezone else None
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| bsd-3-clause |
megan-guidry/dssr2017ABI-mgui210 | InterfaceTest.py | 1 | 6469 | #In this Interface script, an object of class "figureCreate" is created for each of
#the ___ models in the journal article titled _______. The name of each of these objects
#is simply "Figure_" + the Figure number (e.g. Figure_1). Each figureCreate object
#has 4 attributes:
# 1) The Figure number
# 2) The Main Model needed to generate the data within the figure
# 3) The specific cross-bridge model that the main model requires
# 4) The specific Ca2+ model that the main model requires
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
import itertools
import sys
class figureCreate:
def __init__(self, figureNumber, mainModel, xbModel, ca2Model, xVariables, yVariables, ca2Type, caption):
self.figureNumber = figureNumber #This is an integer value
self.mainModel = mainModel #This is a cellml file
self.xbModel = xbModel #This is a cellml file
self.ca2Model = ca2Model #This is a cellml file
self.xVariables = xVariables #This is the CSV column title(s) (e.g. [A, A])
self.yVariables = yVariables #This is the CSV column title(s) (e.g. [B, D])
self.ca2Type = ca2Type #This is either fixed (F) or Dynamic (D)
self.caption = caption
def run_and_plot(figureNumber, fig2Reproduce, contractionType, contractionTypeValues, numberOfFiles):
for i in range(numberOfFiles):
outputDataFiles = os.listdir("Test_Output")
#Create the .csv output data file name (based on object attributes):
dataFile = figureNumber + "_" + str(fig2Reproduce.ca2Type) + "_" + contractionType + str(contractionTypeValues[i]) + ".CSV"
#Determine the path to the "Test_Output" folder so that we know where to look for the output data once it is created:
outputDataPath = os.path.join("Test_Output", dataFile)
print("Creating file: " + dataFile)
#Run the MeganModel (A simulation needs to be run for each iteration of the for loop)
modelVersionsFile = os.listdir("modelVersions")
testConnectionPath = os.path.join("modelVersions", "testConnection.py")
print(testConnectionPath)
exec(open(testConnectionPath).read()) #This line executes a python file located in the modelVersions folder.
xData = []
yData = []
with open(outputDataPath, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
next(plots, None) #Skip the header line
#for row in islice(plots, 542859, 571430, None):
for row in plots:
xData.append(float(row[1])/2.3)
yData.append(float(row[3]))
plt.plot(xData, yData, label='Loaded from file!')
def main():
#The "Figures" array contains a figureCreate object for each figure in the Manuscript
Figures = []
Figures.append(figureCreate(1, "Main", "XB", "Ca2", ["xdata1", "xdata2", "xdata3"], ["ydata1", "ydata2", "ydata3"], "D", "In this figure, work-loop contractions at differing afterlaods and isometric contractions at different sarcomere lengths, are performed"))
Figures[1 - 1].afterloads = [0.12, 0.15, 0.2]
Figures[1 - 1].sarcomereLengths = [1.9359, 2.0139, 2.1054]
###################################################################################################################
######This next chunk of code grabs the appropriate models (based on the user input) and runs them on hpc.#########
#The model version run is based on what figure (Figure_1, Figure_2, etc...) the user wants to replicate.
#Creating a pointer to the proper figureCreate object based on user input
def check_userInput(typed_input):
try:
typed_input = int(typed_input)
return True
except ValueError:
print("Error: You have not input an integer value")
return False
userInput = input("Please enter the figure number you want to reproduce: ")
check_userInput(userInput)
fig2Reproduce = Figures[int(userInput)-1] #fig2Reproduce is the figureCreate object the code whose attributes include the model names and data values needed to recreate the specific figure
figureNumber = "Figure" + str(userInput)
print("Reproducing " + "Figure " + str(userInput) + ", please wait...")
#Create the .csv output data file name (based on object attributes) This Filename, called dataFile, will be imported into the protocol code.
#NAMING CONVENTION: Identify which file to access (which file has the data you need) based on an objects attributes and the matching filename
#To grab the correct file from the Output folder, I need to know:
# 1) the figureNumber
# 2) Fixed or dynamic [Ca2+]i (e.g. F, D) --> this also comes from the model version run
# 3) the contraction type (e.g. WL, Iso, QR) --> this comes from the Model version run
# 4) The afterload value or sarcomere length(e.g. 0.15)
# 5) I also need to know the .CSV columns that hold the data. This information is saved in an object attribute
#How to determine whether a createFigureobject has a .aftreload attribute (indicating work-loops), a .sarcomereLengths attribute (indicating Isometric contractions), or both
TorF_WL = hasattr(fig2Reproduce, "afterloads")
TorF_Iso = hasattr(fig2Reproduce, "sarcomereLengths")
if TorF_WL == True:
contractionType = "WL"
contractionTypeValues = fig2Reproduce.afterloads
numberOfFiles = len(contractionTypeValues)
run_and_plot(figureNumber, fig2Reproduce, contractionType, contractionTypeValues, numberOfFiles)
elif TorF_Iso == True:
contractionType = "Iso"
contractionTypeValues = fig2Reproduce.sarcomereLengths
numberOfFiles = len(contractionTypeValues)
run_and_plot(figureNumber, fig2Reproduce, contractionType, contractionTypeValues, numberOfFiles)
#Formatting and displaying the figure:
plt.xlabel("Normalised Sarcomere Length")
plt.ylabel("Normalised Total Force")
plt.title(figureNumber)
plt.axis([0.75, 1, 0, 0.5])
plt.text(.1,.1,fig2Reproduce.caption)
F = plt.show() #plt.show is place after the loop so that all data plotted in the loop will show on one figure
if __name__ == "__main__":
main()
| apache-2.0 |
Mark24Code/python | chen2aaron/0000/img.py | 38 | 1221 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by xixijun
# Date: 15-5-13
# Blog: morningchen.com
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
class AddNumToPic(object):
"""
第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,
类似于微信未读信息数量那种提示效果。 类似于图中效果
"""
def __init__(self):
self.font = None
self.img = None
def open(self, img_path):
self.img = Image.open(img_path)
return True
def set_font(self, font_path, size):
self.font = ImageFont.truetype(font_path, size)
return True
def draw_text(self, str, color, ttf):
xSize, ySize = self.img.size
fontSize = min(xSize, ySize) // 11
position = (0.9 * xSize, 0.1 * ySize - fontSize)
draw = ImageDraw.Draw(self.img)
draw.text(position, str, fill=color, font=ttf)
self.img.show()
self.img.save(str + "number" + '.png')
return True
if __name__ == '__main__':
pic = AddNumToPic()
pic.open('img.png')
pic.set_font('microsoft_yahei.TTF', 80)
pic.draw_text('4', 'red', pic.font)
| mit |
makinacorpus/Geotrek | geotrek/feedback/models.py | 2 | 6429 | import html
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.utils.translation import gettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from geotrek.common.mixins import PicturesMixin
from mapentity.models import MapEntityMixin
from geotrek.common.mixins import TimeStampedModelMixin
from geotrek.trekking.models import Trek
from .helpers import send_report_managers, post_report_to_suricate
logger = logging.getLogger(__name__)
def status_default():
"""Set status to New by default"""
new_status_query = ReportStatus.objects.filter(label='Nouveau')
if new_status_query:
return new_status_query.get().pk
return None
class Report(MapEntityMixin, PicturesMixin, TimeStampedModelMixin):
""" User reports, mainly submitted via *Geotrek-rando*.
"""
email = models.EmailField(verbose_name=_("Email"))
comment = models.TextField(blank=True,
default="",
verbose_name=_("Comment"))
activity = models.ForeignKey('ReportActivity',
on_delete=models.CASCADE,
null=True,
blank=True,
verbose_name=_("Activity"))
category = models.ForeignKey('ReportCategory',
on_delete=models.CASCADE,
null=True,
blank=True,
verbose_name=_("Category"))
problem_magnitude = models.ForeignKey('ReportProblemMagnitude',
null=True,
blank=True,
on_delete=models.CASCADE,
verbose_name=_("Problem magnitude"))
status = models.ForeignKey('ReportStatus',
on_delete=models.CASCADE,
null=True,
blank=True,
default=status_default,
verbose_name=_("Status"))
geom = models.PointField(null=True,
blank=True,
default=None,
verbose_name=_("Location"),
srid=settings.SRID)
related_trek = models.ForeignKey(Trek,
null=True,
blank=True,
on_delete=models.CASCADE,
verbose_name=_('Related trek'))
class Meta:
verbose_name = _("Report")
verbose_name_plural = _("Reports")
ordering = ['-date_insert']
def __str__(self):
if self.email:
return self.email
return "Anonymous report"
@property
def email_display(self):
return '<a data-pk="%s" href="%s" title="%s" >%s</a>' % (self.pk,
self.get_detail_url(),
self,
self)
@property
def full_url(self):
try:
return '{}{}'.format(
settings.ALLOWED_HOSTS[0],
self.get_detail_url()
)
except KeyError:
# Do not display url if there is no ALLOWED_HOSTS
return ""
@classmethod
def get_create_label(cls):
return _("Add a new feedback")
@property
def geom_wgs84(self):
return self.geom.transform(4326, clone=True)
@property
def comment_text(self):
return html.unescape(self.comment)
@receiver(post_save, sender=Report, dispatch_uid="on_report_created")
def on_report_saved(sender, instance, created, **kwargs):
""" Send an email to managers when a report is created.
"""
if not created:
return
try:
send_report_managers(instance)
except Exception as e:
logger.error('Email could not be sent to managers.')
logger.exception(e) # This sends an email to admins :)
if settings.SURICATE_REPORT_ENABLED:
try:
post_report_to_suricate(instance)
except Exception as e:
logger.error('Report could not be sent to Suricate API.')
logger.exception(e)
class ReportActivity(models.Model):
"""Activity involved in report"""
label = models.CharField(verbose_name=_("Activity"),
max_length=128)
suricate_id = models.PositiveIntegerField(_("Suricate id"),
null=True,
blank=True)
class Meta:
verbose_name = _("Activity")
verbose_name_plural = _("Activities")
ordering = ("label",)
def __str__(self):
return self.label
class ReportCategory(models.Model):
label = models.CharField(verbose_name=_("Category"),
max_length=128)
suricate_id = models.PositiveIntegerField(_("Suricate id"),
null=True,
blank=True)
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
ordering = ("label",)
def __str__(self):
return self.label
class ReportStatus(models.Model):
label = models.CharField(verbose_name=_("Status"),
max_length=128)
class Meta:
verbose_name = _("Status")
verbose_name_plural = _("Status")
def __str__(self):
return self.label
class ReportProblemMagnitude(models.Model):
"""Report problem magnitude"""
label = models.CharField(verbose_name=_("Problem magnitude"),
max_length=128)
suricate_id = models.PositiveIntegerField(_("Suricate id"),
null=True,
blank=True)
class Meta:
verbose_name = _("Problem magnitude")
verbose_name_plural = _("Problem magnitudes")
ordering = ("id",)
def __str__(self):
return self.label
| bsd-2-clause |
watspidererik/testenv | flask/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| mit |
McKenzieKohn/scopus_spider | build/lib/scopus_spider/command_line.py | 2 | 3897 | import parsing
import dir_creation
import scrape
import sys
import argparse
from pprint import pprint as p
import get
def main(args=sys.argv):
"""
"""
parser = argparse.ArgumentParser(description="scopus_spider")
# dir options
dirs = parser.add_argument_group()
dirs.add_argument("-D", dest="input_dir", action="store",
help="Directory for config and outputing data",
default=args[0])
dirs.add_argument("-O", dest="output_dir", action="store",
help="Alternative output directory")
# query type options
query = parser.add_mutually_exclusive_group(required=True)
query.add_argument("-A", dest="author_search_auid", action="store",
help="Search for authors by Scopus au-id")
query.add_argument("-I", dest="pub_search_pmid", action="store",
help="Search for publications using pubmed pmid")
query.add_argument("-P", dest="pub_search_eid", action="store",
help="Search for publications using scopus eid")
# options for query types
options = parser.add_argument_group()
options.add_argument("-m", dest="metadata", action="store_true",
help="default, metadata")
# options.add_argument("-c", dest="coauthors", action="store_true",
# help="coauthors")
options.add_argument("-d", dest="2nd_coauthors", action="store_true",
help="2nd level coauthors")
options.add_argument("-r", dest="references", action="store_true",
help="get references")
# options.add_argument("-s", dest="reference_authors", action="store_true",
# help="reference authors")
options.add_argument("-a", dest="cited_by_meta", action="store_true",
help="get meta data for cited by")
options.add_argument("-p", dest="pdfs", action="store_true",
help="get pdfs for meta data")
options.add_argument("-o", dest="pdf_ocr", action="store_true",
help="ocr retrieved pdfs")
# options.add_argument("-T", dest="all_options", action="store_true",
# help="equivalent to -mcdrsapo")
# Test
parser.add_argument("-t", dest="test", action="store",
help="test")
opts = vars(parser.parse_args())
p(opts)
# tests whether output folders exist and prompt user to delete or reuse
dir_creation.create_output_dirs()
# debugging purposes
# if opts['test'] == '1':
# print "joke2:\n" + joke2()
# p(sys.path)
# print "joke3:\n" + jokes.joke3()
# define api_key
# this must be requested from scopus
api_key = "007c9e3c95c43a49341b251534dac2ae"
if opts['pub_search_pmid'] is not None:
file_input = opts['pub_search_pmid']
search_type = "pmid"
if opts['metadata']:
print("extracting metadata")
output = get.get_meta(api_key, file_input, search_type)
if opts['2nd_coauthors']:
search_option = "auid"
print("extracting 2nd Level coauthors")
output = get.get_coauthors_2ndlevel(api_key,
file_input,
search_type,
search_option)
if opts['references']:
print(opts['auth_id_search'])
print("extracting references")
if opts['cited_by_meta']:
print(opts['auth_id_search'])
print("extracting cited_by")
if opts['pdfs']:
print(opts['auth_id_search'])
print("extracting PDFs")
if opts['pdf_ocr']:
print(opts['auth_id_search'])
print("extracting test from PDFs")
return(output)
if __name__ == "__main__":
main()
| mit |
gandelman-a/neutron-lbaas | tools/install_venv_common.py | 333 | 5959 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install.")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
| apache-2.0 |
inares/edx-platform | common/djangoapps/contentserver/tests/test.py | 110 | 9262 | """
Tests for StaticContentServer
"""
import copy
import ddt
import logging
import unittest
from uuid import uuid4
from django.conf import settings
from django.test.client import Client
from django.test.utils import override_settings
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.xml_importer import import_course_from_xml
from contentserver.middleware import parse_range_header
from student.models import CourseEnrollment
log = logging.getLogger(__name__)
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
@ddt.ddt
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ContentStoreToyCourseTest(ModuleStoreTestCase):
"""
Tests that use the toy course.
"""
def setUp(self):
"""
Create user and login.
"""
self.staff_pwd = super(ContentStoreToyCourseTest, self).setUp()
self.staff_usr = self.user
self.non_staff_usr, self.non_staff_pwd = self.create_non_staff_user()
self.client = Client()
self.contentstore = contentstore()
store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
self.course_key = store.make_course_key('edX', 'toy', '2012_Fall')
import_course_from_xml(
store, self.user.id, TEST_DATA_DIR, ['toy'],
static_content_store=self.contentstore, verbose=True
)
# A locked asset
self.locked_asset = self.course_key.make_asset_key('asset', 'sample_static.txt')
self.url_locked = unicode(self.locked_asset)
self.contentstore.set_attr(self.locked_asset, 'locked', True)
# An unlocked asset
self.unlocked_asset = self.course_key.make_asset_key('asset', 'another_static.txt')
self.url_unlocked = unicode(self.unlocked_asset)
self.length_unlocked = self.contentstore.get_attr(self.unlocked_asset, 'length')
def test_unlocked_asset(self):
"""
Test that unlocked assets are being served.
"""
self.client.logout()
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
def test_locked_asset_not_logged_in(self):
"""
Test that locked assets behave appropriately in case the user is not
logged in.
"""
self.client.logout()
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 403)
def test_locked_asset_not_registered(self):
"""
Test that locked assets behave appropriately in case user is logged in
in but not registered for the course.
"""
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 403)
def test_locked_asset_registered(self):
"""
Test that locked assets behave appropriately in case user is logged in
and registered for the course.
"""
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password=self.non_staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
def test_locked_asset_staff(self):
"""
Test that locked assets behave appropriately in case user is staff.
"""
self.client.login(username=self.staff_usr, password=self.staff_pwd)
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
def test_range_request_full_file(self):
"""
Test that a range request from byte 0 to last,
outputs partial content status code and valid Content-Range and Content-Length.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes=0-')
self.assertEqual(resp.status_code, 206) # HTTP_206_PARTIAL_CONTENT
self.assertEqual(
resp['Content-Range'],
'bytes {first}-{last}/{length}'.format(
first=0, last=self.length_unlocked - 1,
length=self.length_unlocked
)
)
self.assertEqual(resp['Content-Length'], str(self.length_unlocked))
def test_range_request_partial_file(self):
"""
Test that a range request for a partial file,
outputs partial content status code and valid Content-Range and Content-Length.
first_byte and last_byte are chosen to be simple but non trivial values.
"""
first_byte = self.length_unlocked / 4
last_byte = self.length_unlocked / 2
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=first_byte, last=last_byte)
)
self.assertEqual(resp.status_code, 206) # HTTP_206_PARTIAL_CONTENT
self.assertEqual(resp['Content-Range'], 'bytes {first}-{last}/{length}'.format(
first=first_byte, last=last_byte, length=self.length_unlocked))
self.assertEqual(resp['Content-Length'], str(last_byte - first_byte + 1))
def test_range_request_multiple_ranges(self):
"""
Test that multiple ranges in request outputs the full content.
"""
first_byte = self.length_unlocked / 4
last_byte = self.length_unlocked / 2
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}, -100'.format(
first=first_byte, last=last_byte)
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Content-Range', resp)
self.assertEqual(resp['Content-Length'], str(self.length_unlocked))
@ddt.data(
'bytes 0-',
'bits=0-',
'bytes=0',
'bytes=one-',
)
def test_syntax_errors_in_range(self, header_value):
"""
Test that syntactically invalid Range values result in a 200 OK full content response.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE=header_value)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Content-Range', resp)
def test_range_request_malformed_invalid_range(self):
"""
Test that a range request with malformed Range (first_byte > last_byte) outputs
416 Requested Range Not Satisfiable.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=(self.length_unlocked / 2), last=(self.length_unlocked / 4))
)
self.assertEqual(resp.status_code, 416)
def test_range_request_malformed_out_of_bounds(self):
"""
Test that a range request with malformed Range (first_byte, last_byte == totalLength, offset by 1 error)
outputs 416 Requested Range Not Satisfiable.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=(self.length_unlocked), last=(self.length_unlocked))
)
self.assertEqual(resp.status_code, 416)
@ddt.ddt
class ParseRangeHeaderTestCase(unittest.TestCase):
"""
Tests for the parse_range_header function.
"""
def setUp(self):
super(ParseRangeHeaderTestCase, self).setUp()
self.content_length = 10000
def test_bytes_unit(self):
unit, __ = parse_range_header('bytes=100-', self.content_length)
self.assertEqual(unit, 'bytes')
@ddt.data(
('bytes=100-', 1, [(100, 9999)]),
('bytes=1000-', 1, [(1000, 9999)]),
('bytes=100-199, 200-', 2, [(100, 199), (200, 9999)]),
('bytes=100-199, 200-499', 2, [(100, 199), (200, 499)]),
('bytes=-100', 1, [(9900, 9999)]),
('bytes=-100, -200', 2, [(9900, 9999), (9800, 9999)])
)
@ddt.unpack
def test_valid_syntax(self, header_value, excepted_ranges_length, expected_ranges):
__, ranges = parse_range_header(header_value, self.content_length)
self.assertEqual(len(ranges), excepted_ranges_length)
self.assertEqual(ranges, expected_ranges)
@ddt.data(
('bytes=one-20', ValueError, 'invalid literal for int()'),
('bytes=-one', ValueError, 'invalid literal for int()'),
('bytes=-', ValueError, 'invalid literal for int()'),
('bytes=--', ValueError, 'invalid literal for int()'),
('bytes', ValueError, 'Invalid syntax'),
('bytes=', ValueError, 'Invalid syntax'),
('bytes=0', ValueError, 'Invalid syntax'),
('bytes=0-10,0', ValueError, 'Invalid syntax'),
('bytes=0=', ValueError, 'too many values to unpack'),
)
@ddt.unpack
def test_invalid_syntax(self, header_value, exception_class, exception_message_regex):
self.assertRaisesRegexp(
exception_class, exception_message_regex, parse_range_header, header_value, self.content_length
)
| agpl-3.0 |
joelfrederico/SciSalt | scisalt/matplotlib/latexfig.py | 1 | 2872 | import os
import shutil
import shlex
import subprocess
def latexfig(textstr, filename, environment='align*', env_curly=None):
# ======================================
# Validate Extension
# ======================================
split = os.path.splitext(filename)
if split[1] != '.pdf':
raise IOError('Final filename must have extension ''pdf'', requested: {}'.format(filename))
# ======================================
# Get final destination
# ======================================
final_path = os.path.abspath(filename)
try:
os.mkdir(os.path.dirname(final_path))
except OSError as e:
if e.errno != 17:
raise
# ======================================
# Get current directory
# ======================================
cwd = os.getcwdu()
# ======================================
# Create info to write
# ======================================
template = r'''
\documentclass[10pt]{{article}}
\usepackage{{amssymb, amsmath, booktabs, multirow}}
\pagestyle{{empty}}
\begin{{document}}
\begin{{{environment}}}{env_curly}
{textstr}
\end{{{environment}}}
\end{{document}}
'''
if env_curly is not None:
env_curly = '{{{}}}'.format(env_curly)
else:
env_curly = ''
fullwrite = template.format(textstr=textstr, environment=environment, env_curly=env_curly)
# ======================================
# Get file names for intermediate files
# ======================================
tempdir = 'temp_latex'
tempfile = 'temp.tex'
split = os.path.splitext(tempfile)
pdffile = '{}.{}'.format(split[0], 'pdf')
cropfile = '{}-crop.{}'.format(split[0], 'pdf')
# ======================================
# Delete and remake temp directory
# ======================================
try:
shutil.rmtree(tempdir)
except OSError as e:
if e.errno is not 2:
raise
os.mkdir(tempdir)
# ======================================
# Create temporary file
# ======================================
f = open(os.path.join(tempdir, tempfile), 'w+')
f.write(fullwrite)
f.close()
# ======================================
# Compile figure
# ======================================
os.chdir(tempdir)
command = 'latexmk {} -pdf'.format(tempfile)
args = shlex.split(command)
subprocess.call(args)
command = 'pdfcrop {} {}'.format(pdffile, cropfile)
args = shlex.split(command)
subprocess.call(args)
# ======================================
# Move figure to correct location
# ======================================
shutil.move(cropfile, final_path)
# ======================================
# Clean up
# ======================================
os.chdir(cwd)
shutil.rmtree(tempdir)
| mit |
bird-house/OWSLib | owslib/wps.py | 1 | 82757 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2018 Luca Cinquini
#
# Authors : Luca Cinquini <luca.cinquini@jpl.nasa.gov>
# Carsten Ehbrecht <ehbrecht@dkrz.de>
#
# Contact email: ehbrecht@dkrz.de
# =============================================================================
"""
Abstract
--------
The wps module of the OWSlib package provides client-side functionality for executing invocations to a remote
Web Processing Server.
Disclaimer
----------
.. warning:: The owslib wps module should be considered in beta state: it has been tested versus only a handful of
WPS services (deployed by the USGS, BADC and PML).
More extensive testing is needed and feedback is appreciated.
Usage
-----
The module can be used to execute three types of requests versus a remote WPS endpoint:
# "GetCapabilities"
* use the method wps.getcapabilities(xml=None)
* the optional keyword argument "xml" may be used to avoid a real live request, and instead read the
WPS capabilities document from a cached XML file
# "DescribeProcess"
* use the method wps.describeprocess(identifier, xml=None)
* identifier is the process identifier, retrieved from the list obtained from a previous
"GetCapabilities" invocation
* the optional keyword argument "xml" may be used to avoid a real live request, and instead read the
WPS process description document from a cached XML file
# "Execute"
* use the method wps.execute(identifier, inputs, output=None, request=None, response=None),
which submits the job to the remote WPS server and returns a WPSExecution object that can be used to periodically
check the job status until completion (or error)
* the optional keyword argument "request" may be used to avoid re-building the request XML from input arguments,
and instead submit a request from a pre-made XML file
* alternatively, an "Execute" request can be built from input arguments by supplying the "identifier", "inputs"
and "output" arguments to the execute() method.
* "identifier" is the mandatory process identifier
* "inputs" is a dictionary of (key,value) pairs where:
* key is a named input parameter
* value is either a string, or any python object that supports a getXml() method
In particular, a few classes are included in the package to support a FeatuteCollection input:
* "WFSFeatureCollection" can be used in conjunction with "WFSQuery" to define a FEATURE_COLLECTION
retrieved from a live WFS server.
* "GMLMultiPolygonFeatureCollection" can be used to define one or more polygons
of (latitude, longitude) points.
* "output" is an optional output identifier to be included in the ResponseForm section of the request.
* the optional keyword argument "response" mey be used to avoid submitting a real live request, and instead
reading the WPS execution response document from a cached XML file (for debugging or testing purposes)
* the convenience module function monitorExecution() can be used to periodically check the status of a remote
running job, and eventually download the output either to a named file, or to a file specified by the server.
Examples
--------
The files examples/wps-usgs-script.py, examples/wps-pml-script-1.py and examples/wps-pml-script-2.py contain
real-world usage examples that submits a "GetCapabilities", "DescribeProcess" and "Execute" requests to
the live USGS and PML servers. To run:
* cd examples
* python wps-usgs-script.py
* python wps-pml-script-1.py
* python wps-pml-script-2.py
The file wps-client.py contains a command-line client that can be used to submit a "GetCapabilities",
"DescribeProcess" or "Execute" request to an arbitrary WPS server. For example, you can run it as follows:
* cd examples
* To prints out usage and example invocations: wps-client -help
* To execute a (fake) WPS invocation::
$ wps-client.py -v -u http://cida.usgs.gov/climate/gdp/process/WebProcessingService -r GetCapabilities -x ../tests/USGSCapabilities.xml # noqa
The directory tests/ includes several doctest-style files wps_*.txt that show how to interactively submit a
"GetCapabilities", "DescribeProcess" or "Execute" request, without making a live request but rather parsing the
response of cached XML response documents. To run:
* cd tests
* python -m doctest wps_*.txt
``(or python -m doctest -v wps_*.txt for verbose output)``
Also, the directory tests/ contains several examples of well-formed "Execute" requests:
* The files wps_USGSExecuteRequest*.xml contain requests that can be submitted to the live USGS WPS service.
* The files PMLExecuteRequest*.xml contain requests that can be submitted to the live PML WPS service.
"""
from owslib.etree import etree
from owslib.ows import DEFAULT_OWS_NAMESPACE, XLINK_NAMESPACE
from owslib.ows import ServiceIdentification, ServiceProvider, OperationsMetadata, BoundingBox
from time import sleep
from owslib.util import (testXMLValue, testXMLAttribute, build_get_url, clean_ows_url, dump, getTypedValue,
getNamespace, element_to_string, nspath, openURL, nspath_eval, log, Authentication)
from xml.dom.minidom import parseString
from owslib.namespaces import Namespaces
from urllib.parse import urlparse
import warnings
# namespace definition
n = Namespaces()
# These static namespaces are DEPRECIATED. Please don't use them.
# No great way of printing a message since there are at the file level
WPS_DEFAULT_NAMESPACE = n.get_namespace("wps")
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
DRAW_NAMESPACE = n.get_namespace("draw")
GML_SCHEMA_LOCATION = "http://schemas.opengis.net/gml/3.1.1/base/feature.xsd"
DRAW_SCHEMA_LOCATION = 'http://cida.usgs.gov/climate/derivative/xsd/draw.xsd'
WFS_SCHEMA_LOCATION = 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd'
WPS_DEFAULT_SCHEMA_LOCATION = 'http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd'
WPS_DEFAULT_VERSION = '1.0.0'
# WPS execution modes
AUTO = 'auto'
SYNC = 'sync'
ASYNC = 'async'
def get_namespaces():
ns = n.get_namespaces(["ogc", "wfs", "wps", "gml", "xsi", "xlink"])
ns[None] = n.get_namespace("wps")
ns["ows"] = DEFAULT_OWS_NAMESPACE
return ns
namespaces = get_namespaces()
def is_reference(val):
"""
Checks if the provided value is a reference (URL).
"""
try:
parsed = urlparse(val)
is_ref = bool(parsed.scheme)
except Exception:
is_ref = False
return is_ref
def is_literaldata(val):
"""
Checks if the provided value is a string (includes unicode).
"""
return isinstance(val, str)
def is_boundingboxdata(val):
"""
Checks if the provided value is an implementation of ``BoundingBoxDataInput``.
"""
return isinstance(val, BoundingBoxDataInput)
def is_complexdata(val):
"""
Checks if the provided value is an implementation of ``IComplexDataInput``.
"""
return isinstance(val, IComplexDataInput)
def _fix_auth(auth, username=None, password=None, verify=None, cert=None):
"""Updates auth from deprecated parameters username, password, verify and cert."""
if any(p is not None for p in (username, password, verify, cert)):
message = 'The use of "username", "password", "verify", and "cert" is deprecated. ' + \
'Please use the "auth" keyword during class instantiation. ' + \
'These keywords will be removed in a future release.'
warnings.warn(message, DeprecationWarning)
if username is not None:
auth.username = username
if password is not None:
auth.password = password
if verify is not None:
auth.verify = verify
if cert is not None:
auth.cert = cert
return auth
class IComplexDataInput(object):
"""
Abstract interface representing complex input object for a WPS request.
"""
def getXml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
raise NotImplementedError
class WebProcessingService(object):
"""
Class that contains client-side functionality for invoking an OGC Web Processing Service (WPS).
Implements IWebProcessingService.
"""
def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=None, verbose=False, skip_caps=False,
headers=None, verify=None, cert=None, timeout=None, auth=None, language=None):
"""
Initialization method resets the object status.
By default it will execute a GetCapabilities invocation to the remote service,
which can be skipped by using skip_caps=True.
Parameters username, password, verify and cert are deprecated. Please use auth parameter.
"""
self.auth = auth or Authentication()
_fix_auth(self.auth, username, password, verify, cert)
# fields passed in from object initializer
self.url = clean_ows_url(url)
self.version = version
self.verbose = verbose
self.headers = headers
self.timeout = timeout
self.language = language
# fields populated by method invocations
self._capabilities = None
self.identification = None
self.provider = None
self.operations = []
self.processes = []
self.languages = None
if not skip_caps:
self.getcapabilities()
def getcapabilities(self, xml=None):
"""
Method that requests a capabilities document from the remote WPS server and populates this object's metadata.
keyword argument xml: local XML GetCapabilities document, prevents actual HTTP invocation.
"""
# read capabilities document
reader = WPSCapabilitiesReader(
version=self.version,
verbose=self.verbose,
auth=self.auth,
language=self.language,
)
if xml:
# read from stored XML file
self._capabilities = reader.readFromString(xml)
else:
self._capabilities = reader.readFromUrl(
self.url, headers=self.headers)
log.debug(element_to_string(self._capabilities))
# populate the capabilities metadata obects from the XML tree
self._parseCapabilitiesMetadata(self._capabilities)
def describeprocess(self, identifier, xml=None):
"""
Requests a process document from a WPS service and populates the process metadata.
Returns the process object or a list of process objects.
:param str identifier: The process id. If `all`, return a list of all processes available.
"""
# read capabilities document
reader = WPSDescribeProcessReader(
version=self.version,
verbose=self.verbose,
auth=self.auth,
language=self.language,
)
if xml:
# read from stored XML file
rootElement = reader.readFromString(xml)
else:
# read from server
rootElement = reader.readFromUrl(
self.url, identifier, headers=self.headers)
log.info(element_to_string(rootElement))
# build metadata objects
processes = self._parseProcessMetadata(rootElement)
if identifier == 'all':
return processes
# return process with given identifier
for process in processes:
if process.identifier == identifier:
return process
raise ValueError('process with identifier {} not found'.format(identifier))
def execute(self, identifier, inputs, output=None, mode=ASYNC, lineage=False, request=None, response=None):
"""
Submits a WPS process execution request.
Returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately
retrieve the result.
:param str identifier: the requested process identifier
:param inputs: list of process inputs as (input_identifier, value) tuples (where value is either a string
for LiteralData, or an object for ComplexData).
:param output: optional list of process outputs as tuples (output_identifier, as_ref, mime_type).
`as_ref` can be True (as reference),
False (embedded in response) or None (use service default).
`mime_type` should be text or None (use service default)
:param mode: execution mode: SYNC, ASYNC or AUTO. Default: ASYNC
:param lineage: if lineage is "true", the Execute operation response shall include the DataInputs and
OutputDefinitions elements.
:param request: optional pre-built XML request document, prevents building of request from other arguments
:param response: optional pre-built XML response document, prevents submission of request to live WPS server
"""
# instantiate a WPSExecution object
log.info('Executing WPS request...')
execution = WPSExecution(
version=self.version,
url=self.url,
verbose=self.verbose,
headers=self.headers,
timeout=self.timeout,
auth=self.auth,
language=self.language,
)
# build XML request from parameters
if request is None:
requestElement = execution.buildRequest(identifier, inputs, output, mode=mode, lineage=lineage)
request = etree.tostring(requestElement)
execution.request = request
log.debug(request)
# submit the request to the live server
if response is None:
response = execution.submitRequest(request)
else:
response = etree.fromstring(response)
log.debug(etree.tostring(response))
# parse response
execution.parseResponse(response)
return execution
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
def _parseProcessMetadata(self, rootElement):
"""Return a list of Process objects parsed from a <ProcessDescriptions> XML element."""
processDescriptionElements = rootElement.findall('ProcessDescription')
processes = []
for processDescriptionElement in processDescriptionElements:
process = Process(processDescriptionElement, verbose=self.verbose)
# override existing processes in object metadata, if existing already
found = False
for n, p in enumerate(self.processes):
if p.identifier == process.identifier:
self.processes[n] = process
found = True
# otherwise add it
if not found:
self.processes.append(process)
processes.append(process)
return processes
def _parseCapabilitiesMetadata(self, root):
"""Set up capabilities metadata objects."""
# reset metdata
self.operations = []
self.processes = []
# use the WPS namespace defined in the document root
# TODO: wpsns not used
# wpsns = getNamespace(root)
self.updateSequence = root.attrib.get('updateSequence')
# loop over children WITHOUT requiring a specific namespace
for element in root:
# this element's namespace
ns = getNamespace(element)
# <ows:ServiceIdentification> metadata
if element.tag.endswith('ServiceIdentification'):
self.identification = ServiceIdentification(
element, namespace=ns)
if self.verbose is True:
dump(self.identification)
# <ows:ServiceProvider> metadata
elif element.tag.endswith('ServiceProvider'):
self.provider = ServiceProvider(element, namespace=ns)
if self.verbose is True:
dump(self.provider)
# <ns0:OperationsMetadata xmlns:ns0="http://www.opengeospatial.net/ows">
# <ns0:Operation name="GetCapabilities">
# <ns0:DCP>
# <ns0:HTTP>
# <ns0:Get xlink:href="http://ceda-wps2.badc.rl.ac.uk/wps?" xmlns:xlink="http://www.w3.org/1999/xlink" /> # noqa
# </ns0:HTTP>
# </ns0:DCP>
# </ns0:Operation>
# ........
# </ns0:OperationsMetadata>
elif element.tag.endswith('OperationsMetadata'):
for child in element.findall(nspath('Operation', ns=ns)):
self.operations.append(
OperationsMetadata(child, namespace=ns))
if self.verbose is True:
dump(self.operations[-1])
# <wps:ProcessOfferings>
# <wps:Process ns0:processVersion="1.0.0">
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Identifier> # noqa
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Title> # noqa
# </wps:Process>
# ......
# </wps:ProcessOfferings>
elif element.tag.endswith('ProcessOfferings'):
for child in element.findall(nspath('Process', ns=ns)):
p = Process(child, verbose=self.verbose)
self.processes.append(p)
if self.verbose is True:
dump(self.processes[-1])
# <wps:Languages>
# <wps:Default>
# <ows:Language>en-US</ows:Language>
# </wps:Default>
# <wps:Supported>
# <ows:Language>en-US</ows:Language>
# <ows:Language>fr-CA</ows:Language>
# ......
# </wps:Supported>
# </wps:Languages>
elif element.tag.endswith('Languages'):
self.languages = Languages(element)
if self.verbose:
dump(self.languages)
class WPSReader(object):
"""
Superclass for reading a WPS document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False, timeout=30, auth=None, language=None):
self.version = version
self.verbose = verbose
self.timeout = timeout
self.auth = auth or Authentication()
self.language = language
def _readFromUrl(self, url, data, timeout, method='Get', username=None, password=None,
headers=None, verify=True, cert=None):
"""
Method to get and parse a WPS document, returning an elementtree instance.
:param str url: WPS service base url.
:param {} data: GET: dictionary of HTTP (key, value) parameter pairs, POST: XML document to post
"""
_fix_auth(self.auth, username, password, verify, cert)
if method == 'Get':
# full HTTP request url
if self.language:
data["language"] = self.language
request_url = build_get_url(url, data, overwrite=True)
log.debug(request_url)
# split URL into base url and query string to use utility function
spliturl = request_url.split('?')
u = openURL(spliturl[0], spliturl[
1], method='Get', username=self.auth.username, password=self.auth.password,
headers=headers, verify=self.auth.verify, cert=self.auth.cert, timeout=self.timeout)
return etree.fromstring(u.read())
elif method == 'Post':
u = openURL(url, data, method='Post',
username=self.auth.username, password=self.auth.password,
headers=headers, verify=self.auth.verify, cert=self.auth.cert, timeout=timeout)
return etree.fromstring(u.read())
else:
raise Exception("Unrecognized HTTP method: %s" % method)
def readFromString(self, string):
"""
Method to read a WPS GetCapabilities document from an XML string.
"""
if not isinstance(string, str) and not isinstance(string, bytes):
raise ValueError(
"Input must be of type string, not %s" % type(string))
return etree.fromstring(string)
class WPSCapabilitiesReader(WPSReader):
"""
Utility class that reads and parses a WPS GetCapabilities document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False, timeout=None, auth=None, language=None):
# superclass initializer
super(WPSCapabilitiesReader, self).__init__(
version=version, verbose=verbose, timeout=timeout, auth=auth, language=language)
def readFromUrl(self, url, username=None, password=None,
headers=None, verify=None, cert=None):
"""
Method to get and parse a WPS capabilities document, returning an elementtree instance.
:param str url: WPS service base url, to which is appended the HTTP parameters: service, version, and request.
"""
return self._readFromUrl(url,
{'service': 'WPS', 'request':
'GetCapabilities', 'version': self.version},
self.timeout,
username=username, password=password,
headers=headers, verify=verify, cert=cert)
class WPSDescribeProcessReader(WPSReader):
"""
Class that reads and parses a WPS DescribeProcess document into a etree infoset
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False, timeout=None, auth=None, language=None):
# superclass initializer
super(WPSDescribeProcessReader, self).__init__(
version=version, verbose=verbose, timeout=timeout, auth=auth, language=language)
def readFromUrl(self, url, identifier, username=None, password=None,
headers=None, verify=None, cert=None):
"""
Reads a WPS DescribeProcess document from a remote service and returns the XML etree object
:param str url: WPS service base url, to which is appended the HTTP parameters: 'service', 'version',
'request', and 'identifier'.
"""
return self._readFromUrl(url,
{'service': 'WPS', 'request': 'DescribeProcess',
'version': self.version, 'identifier': identifier},
self.timeout,
username=username, password=password,
headers=headers, verify=verify, cert=cert)
class WPSExecuteReader(WPSReader):
"""
Class that reads and parses a WPS Execute response document into a etree infoset
"""
def __init__(self, verbose=False, timeout=None, auth=None, language=None):
# superclass initializer
super(WPSExecuteReader, self).__init__(verbose=verbose, timeout=timeout, auth=auth, language=language)
def readFromUrl(self, url, data={}, method='Get', username=None, password=None,
headers=None, verify=None, cert=None):
"""
Reads a WPS status document from a remote service and returns the XML etree object.
:param str url: the URL to submit the GET/POST request to.
"""
return self._readFromUrl(url, data, self.timeout, method, username=username, password=password,
headers=headers, verify=verify, cert=cert)
class WPSExecution(object):
"""
Class that represents a single WPS process executed on a remote WPS service.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, url=None, username=None, password=None, verbose=False,
headers=None, verify=None, cert=None, timeout=None, auth=None, language=None):
# initialize fields
self.url = url
self.version = version
self.verbose = verbose
self.headers = headers
self.auth = auth or Authentication()
_fix_auth(self.auth, username, password, verify, cert)
self.timeout = timeout
self.language = language
# request document
self.request = None
# last response document
self.response = None
# status fields retrieved from the response documents
self.process = None
self.serviceInstance = None
self.status = None
self.percentCompleted = 0
self.statusMessage = None
self.errors = []
self.statusLocation = None
self.dataInputs = []
self.processOutputs = []
self.creationTime = None
def buildRequest(self, identifier, inputs=[], output=None, mode=ASYNC, lineage=False):
"""
Method to build a WPS process request.
:param str identifier: the requested process identifier
:param inputs: array of input arguments for the process.
- LiteralData inputs are expressed as simple (key,value) tuples where key is the input identifier,
value is the value
- ComplexData inputs are expressed as (key, object) tuples, where key is the input identifier,
and the object must contain a 'getXml()' method that returns an XML infoset to be included in
the WPS request
:param output: array of outputs which should be returned:
expressed as tuples (key, as_ref, mime_mype) where key is the output identifier and as_ref is True
if output should be returned as reference.
as_ref and mimeType may be null for using server's default value
:param mode: execution mode: SYNC, ASYNC or AUTO.
:param lineage: if lineage is "true", the Execute operation response shall include the DataInputs and
OutputDefinitions elements.
"""
# TODO: auto mode needs to implemented for WPS 2.0.0
if mode is SYNC:
_async = False
elif mode is AUTO:
log.warn("Auto mode not available in WPS 1.0.0. Using async mode.")
_async = True
else:
_async = True
# <wps:Execute xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# service="WPS"
# version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0
# http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
root = etree.Element(nspath_eval('wps:Execute', namespaces))
root.set('service', 'WPS')
root.set('version', WPS_DEFAULT_VERSION)
if self.language:
root.set('language', self.language)
root.set(nspath_eval('xsi:schemaLocation', namespaces), '%s %s' %
(namespaces['wps'], WPS_DEFAULT_SCHEMA_LOCATION))
# <ows:Identifier>gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
identifierElement = etree.SubElement(
root, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = identifier
# <wps:DataInputs>
dataInputsElement = etree.SubElement(
root, nspath_eval('wps:DataInputs', namespaces))
for (key, val) in inputs:
inputElement = etree.SubElement(
dataInputsElement, nspath_eval('wps:Input', namespaces))
identifierElement = etree.SubElement(
inputElement, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = key
# Literal data
# <wps:Input>
# <ows:Identifier>DATASET_URI</ows:Identifier>
# <wps:Data>
# <wps:LiteralData>dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml</wps:LiteralData>
# </wps:Data>
# </wps:Input>
if is_literaldata(val):
log.debug("literaldata %s", key)
dataElement = etree.SubElement(
inputElement, nspath_eval('wps:Data', namespaces))
literalDataElement = etree.SubElement(
dataElement, nspath_eval('wps:LiteralData', namespaces))
literalDataElement.text = val
# Complex data
# <wps:Input>
# <ows:Identifier>FEATURE_COLLECTION</ows:Identifier>
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd"> # noqa
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
# </wps:Input>
elif is_complexdata(val):
log.debug("complexdata %s", key)
inputElement.append(val.getXml())
elif is_boundingboxdata(val):
inputElement.append(val.get_xml())
else:
raise Exception(
'input type of "%s" parameter is unknown' % key)
# <wps:ResponseForm>
# <wps:ResponseDocument storeExecuteResponse="true" status="true" lineage="false">
# <wps:Output asReference="true" mimeType="application/json">
# <ows:Identifier>OUTPUT</ows:Identifier>
# </wps:Output>
# </wps:ResponseDocument>
# </wps:ResponseForm>
if output is not None:
responseFormElement = etree.SubElement(
root, nspath_eval('wps:ResponseForm', namespaces))
responseDocumentElement = etree.SubElement(
responseFormElement, nspath_eval(
'wps:ResponseDocument', namespaces),
attrib={'storeExecuteResponse': str(_async).lower(),
'status': str(_async).lower(),
'lineage': str(lineage).lower()})
# keeping backward compability of output parameter
if isinstance(output, str):
self._add_output(responseDocumentElement, output)
elif isinstance(output, list):
for ouputTuple in output:
# tuple (identifier, as_reference) for backward compatibility
if(len(ouputTuple) == 2):
(identifier, as_reference) = ouputTuple
mime_type = None
else:
(identifier, as_reference, mime_type) = ouputTuple
self._add_output(
responseDocumentElement, identifier, asReference=as_reference, mimeType=mime_type)
else:
raise Exception(
'output parameter is neither string nor list. output=%s' % output)
return root
def _add_output(self, element, identifier, asReference=None, mimeType=None):
output_element = etree.SubElement(
element, nspath_eval('wps:Output', namespaces))
if isinstance(mimeType, str):
output_element.attrib['mimeType'] = mimeType
if isinstance(asReference, bool):
output_element.attrib['asReference'] = str(asReference).lower()
# outputIdentifierElement
etree.SubElement(
output_element, nspath_eval('ows:Identifier', namespaces)).text = identifier
# wait for 60 seconds by default
def checkStatus(self, url=None, response=None, sleepSecs=60):
"""
Method to check the status of a job execution.
In the process, this method will update the object 'response' attribute.
:param str url: optional 'statusLocation' URL retrieved from a previous WPS Execute response document.
If not provided, the current 'statusLocation' URL will be used.
:param int sleepSecs: number of seconds to sleep before returning control to the caller.
"""
reader = WPSExecuteReader(verbose=self.verbose, auth=self.auth, language=self.language)
if response is None:
# override status location
if url is not None:
self.statusLocation = url
log.info('\nChecking execution status... (location=%s)' %
self.statusLocation)
try:
response = reader.readFromUrl(
self.statusLocation, headers=self.headers)
except Exception:
log.error("Could not read status document.")
else:
response = reader.readFromString(response)
# store latest response
try:
xml = etree.tostring(response)
except Exception:
log.error("Could not parse XML response.")
else:
self.response = xml
log.debug(self.response)
self.parseResponse(response)
# sleep given number of seconds
if self.isComplete() is False:
log.info('Sleeping %d seconds...' % sleepSecs)
sleep(sleepSecs)
def getStatus(self):
return self.status
def isComplete(self):
if (self.status == 'ProcessSucceeded' or self.status == 'ProcessFailed' or self.status == 'Exception'):
return True
elif (self.status == 'ProcessStarted'):
return False
elif (self.status == 'ProcessAccepted' or self.status == 'ProcessPaused'):
return False
else:
raise Exception(
'Unknown process execution status: %s' % self.status)
def isSucceded(self):
if self.status == 'ProcessSucceeded':
return True
else:
return False
def isNotComplete(self):
return not self.isComplete()
def getOutput(self, filepath=None, identifier=None):
"""
Method to write the outputs of a WPS process to a file:
either retrieves the referenced files from the server, or writes out the content of response embedded output.
:param filepath: optional path to the output file, otherwise a file will be created in the local directory with
the name assigned by the server, or default name 'wps.out' for embedded output.
:param: identifier: optional identifier of the output that should be written.
For backward compatibility it will default to the first output.
"""
if self.isSucceded():
content = b''
output = None
if self.processOutputs:
if identifier:
# filter outputs by identifier
outputs = [o for o in self.processOutputs if o.identifier == identifier]
if outputs:
output = outputs[0]
else:
# take the first found output
output = self.processOutputs[0]
if output:
# ExecuteResponse contains reference to server-side output
if output.reference:
content = output.retrieveData(
self.auth.username, self.auth.password,
headers=self.headers, verify=self.auth.verify, cert=self.auth.cert)
if filepath is None:
filepath = output.fileName
# ExecuteResponse contain embedded output
elif len(output.data) > 0:
if filepath is None:
filepath = 'wps.out'
for data in output.data:
content = content + data.encode()
# write out content
if content != b'':
out = open(filepath, 'wb')
out.write(content)
out.close()
log.info(f'Output written to file: {filepath}')
else:
raise Exception(
f"Execution not successfully completed: status={self.status}")
def submitRequest(self, request):
"""
Submits a WPS Execute document to a remote service, returns the XML response document from the server.
This method will save the request document and the first returned response document.
:param request: the XML request document to be submitted as POST to the server.
"""
self.request = request
reader = WPSExecuteReader(verbose=self.verbose, timeout=self.timeout, auth=self.auth)
response = reader.readFromUrl(
self.url, request, method='Post', headers=self.headers)
self.response = response
return response
'''
if response is None:
# override status location
if url is not None:
self.statusLocation = url
else:
response = reader.readFromString(response)
'''
def parseResponse(self, response):
"""
Method to parse a WPS response document
"""
rootTag = response.tag.split('}')[1]
# <ns0:ExecuteResponse>
if rootTag == 'ExecuteResponse':
self._parseExecuteResponse(response)
# <ows:ExceptionReport>
elif rootTag == 'ExceptionReport':
self._parseExceptionReport(response)
else:
log.debug('Unknown Response')
# log status, errors
log.info('Execution status=%s' % self.status)
log.info('Percent completed=%s' % self.percentCompleted)
log.info('Status message=%s' % self.statusMessage)
for error in self.errors:
dump(error)
def _parseExceptionReport(self, root):
"""
Method to parse a WPS ExceptionReport document and populate this object's metadata.
"""
# set exception status
self.status = "Exception"
for exceptionEl in root.findall(nspath('Exception', ns=namespaces['ows'])):
self.errors.append(WPSException(exceptionEl))
def _parseExecuteResponse(self, root):
"""
Method to parse a WPS ExecuteResponse response document and populate this object's metadata.
"""
# retrieve WPS namespace directly from root element
wpsns = getNamespace(root)
self.serviceInstance = root.get('serviceInstance')
if self.statusLocation is None:
self.statusLocation = root.get('statusLocation')
# <ns0:Status creationTime="2011-11-09T14:19:50Z">
# <ns0:ProcessSucceeded>PyWPS Process v.net.path successfully calculated</ns0:ProcessSucceeded>
# </ns0:Status>
# OR
# <ns0:Status creationTime="2011-11-07T08:26:44.359-06:00">
# <ns0:ProcessFailed>
# <ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Exception>
# <ows:ExceptionText>Attribute null not found in feature collection</ows:ExceptionText>
# </ows:Exception>
# </ows:ExceptionReport>
# </ns0:ProcessFailed>
# </ns0:Status>
statusEl = root.find(nspath('Status/*', ns=wpsns))
self.status = statusEl.tag.split('}')[1]
# creationTime attribute
element = root.find(nspath('Status', ns=wpsns))
self.creationTime = testXMLAttribute(element, 'creationTime')
# get progress info
if self.status == 'ProcessSucceeded':
self.percentCompleted = 100
else:
try:
percentCompleted = int(statusEl.get('percentCompleted'))
self.percentCompleted = percentCompleted
except Exception:
pass
# get status message
self.statusMessage = statusEl.text
# exceptions ?
for element in statusEl:
if element.tag.endswith('ExceptionReport'):
self._parseExceptionReport(element)
self.process = Process(
root.find(nspath('Process', ns=wpsns)), verbose=self.verbose)
# <wps:DataInputs xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink">
if len(self.dataInputs) > 0:
log.debug('clean data inputs')
self.dataInputs[:] = []
for inputElement in root.findall(nspath('DataInputs/Input', ns=wpsns)):
self.dataInputs.append(Output(inputElement))
if self.verbose is True:
dump(self.dataInputs[-1])
# <ns:ProcessOutputs>
# xmlns:ns="http://www.opengis.net/wps/1.0.0"
if len(self.processOutputs) > 0:
log.debug('clean process outputs')
self.processOutputs[:] = []
for outputElement in root.findall(nspath('ProcessOutputs/Output', ns=wpsns)):
self.processOutputs.append(Output(outputElement))
if self.verbose is True:
dump(self.processOutputs[-1])
class ComplexData(object):
"""
Class that represents a ComplexData element in a WPS document
"""
def __init__(self, mimeType=None, encoding=None, schema=None):
self.mimeType = mimeType
self.encoding = encoding
self.schema = schema
class InputOutput(object):
"""
Superclass of a WPS input or output data object.
"""
def __init__(self, element):
self.abstract = None
self.metadata = []
# loop over sub-elements without requiring a specific namespace
for child in element:
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">SUMMARIZE_TIMESTEP</ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue(child)
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Summarize Timestep</ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue(child)
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">
# If selected, processing output will include columns with summarized statistics for all
# feature attribute values for each timestep
# </ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue(child)
# <ows:Metadata xlink:title="Documentation" xlink:href="http://emu.readthedocs.org/en/latest/"/>
elif child.tag.endswith('Metadata'):
self.metadata.append(Metadata(child))
self.allowedValues = []
self.supportedValues = []
self.defaultValue = None
self.dataType = None
self.anyValue = False
def _parseData(self, element):
"""
Method to parse a "Data" element
"""
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927 # noqa
# </ns0:ComplexData>
# </ns0:Data>
# nspath('Data', ns=WPS_NAMESPACE)
complex_data_element = element.find(
nspath('ComplexData', ns=getNamespace(element)))
if complex_data_element is not None:
self.dataType = "ComplexData"
def _parseLiteralData(self, element, literalElementName):
"""
Method to parse the LiteralData element.
"""
# <LiteralData>
# <ows:DataType ows:reference="xs:string" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AllowedValues xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Value>COMMA</ows:Value>
# <ows:Value>TAB</ows:Value>
# <ows:Value>SPACE</ows:Value>
# </ows:AllowedValues>
# <DefaultValue>COMMA</DefaultValue>
# </LiteralData>
# <LiteralData>
# <ows:DataType ows:reference="xs:anyURI" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AnyValue xmlns:ows="http://www.opengis.net/ows/1.1" />
# </LiteralData>
literal_data_element = element.find(literalElementName)
if literal_data_element is not None:
self.dataType = 'LiteralData'
for sub_element in literal_data_element:
subns = getNamespace(sub_element)
if sub_element.tag.endswith('DataType'):
self.dataType = sub_element.text
if not self.dataType:
reference = sub_element.get(nspath("reference", ns=subns))
# backward search of first non-alpha character (:, #, /, etc.)
pos = len(reference) - 1
while pos >= 0 and reference[pos].isalpha():
pos -= 1
# obtain substring after found non-alpha character position
self.dataType = reference[pos + 1:]
for sub_element in literal_data_element:
subns = getNamespace(sub_element)
if sub_element.tag.endswith('DefaultValue'):
self.defaultValue = getTypedValue(
self.dataType, sub_element.text)
if sub_element.tag.endswith('AllowedValues'):
for value in sub_element.findall(nspath('Value', ns=subns)):
self.allowedValues.append(
getTypedValue(self.dataType, value.text))
elif sub_element.tag.endswith('AnyValue'):
self.anyValue = True
def _parseComplexData(self, element, complexDataElementName):
"""
Method to parse a ComplexData or ComplexOutput element.
"""
# <ComplexData>
# <Default>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# </Default>
# <Supported>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.1.1/feature.xsd</Schema>
# </Format>
# </Supported>
# </ComplexData>
# OR
# <ComplexOutput defaultEncoding="UTF-8" defaultFormat="text/XML" defaultSchema="NONE">
# <SupportedComplexData>
# <Format>text/XML</Format>
# <Encoding>UTF-8</Encoding>
# <Schema>NONE</Schema>
# </SupportedComplexData>
# </ComplexOutput>
complex_data_element = element.find(complexDataElementName)
if complex_data_element is not None:
self.dataType = "ComplexData"
for supported_comlexdata_element in\
complex_data_element.findall('SupportedComplexData'):
self.supportedValues.append(
ComplexData(
mimeType=testXMLValue(
supported_comlexdata_element.find('Format')),
encoding=testXMLValue(
supported_comlexdata_element.find('Encoding')),
schema=testXMLValue(
supported_comlexdata_element.find('Schema'))
)
)
for format_element in\
complex_data_element.findall('Supported/Format'):
self.supportedValues.append(
ComplexData(
mimeType=testXMLValue(format_element.find('MimeType')),
encoding=testXMLValue(format_element.find('Encoding')),
schema=testXMLValue(format_element.find('Schema'))
)
)
default_format_element = complex_data_element.find('Default/Format')
if default_format_element is not None:
self.defaultValue = ComplexData(
mimeType=testXMLValue(
default_format_element.find('MimeType')),
encoding=testXMLValue(
default_format_element.find('Encoding')),
schema=testXMLValue(default_format_element.find('Schema'))
)
def _parseBoundingBoxData(self, element, bboxElementName):
"""
Method to parse the BoundingBoxData element.
"""
# <BoundingBoxData>
# <Default>
# <CRS>epsg:4326</CRS>
# </Default>
# <Supported>
# <CRS>epsg:4326</CRS>
# </Supported>
# </BoundingBoxData>
#
# OR
#
# <BoundingBoxOutput>
# <Default>
# <CRS>epsg:4326</CRS>
# </Default>
# <Supported>
# <CRS>epsg:4326</CRS>
# </Supported>
# </BoundingBoxOutput>
bbox_data_element = element.find(bboxElementName)
if bbox_data_element is not None:
self.dataType = 'BoundingBoxData'
for bbox_element in bbox_data_element.findall('Supported/CRS'):
self.supportedValues.append(bbox_element.text)
default_bbox_element = bbox_data_element.find('Default/CRS')
if default_bbox_element is not None:
self.defaultValue = default_bbox_element.text
class Input(InputOutput):
"""
Class that represents a WPS process input.
"""
def __init__(self, inputElement):
# superclass initializer
super(Input, self).__init__(inputElement)
# <Input maxOccurs="1" minOccurs="0">
# OR
# <MinimumOccurs>1</MinimumOccurs>
self.minOccurs = -1
if inputElement.get("minOccurs") is not None:
self.minOccurs = int(inputElement.get("minOccurs"))
if inputElement.find('MinimumOccurs') is not None:
self.minOccurs = int(
testXMLValue(inputElement.find('MinimumOccurs')))
self.maxOccurs = -1
if inputElement.get("maxOccurs") is not None:
self.maxOccurs = int(inputElement.get("maxOccurs"))
if inputElement.find('MaximumOccurs') is not None:
self.maxOccurs = int(
testXMLValue(inputElement.find('MaximumOccurs')))
# <LiteralData>
self._parseLiteralData(inputElement, 'LiteralData')
# <ComplexData>
self._parseComplexData(inputElement, 'ComplexData')
# <BoundingBoxData>
self._parseBoundingBoxData(inputElement, 'BoundingBoxData')
class Output(InputOutput):
"""
Class that represents a WPS process output.
"""
def __init__(self, outputElement):
# superclass initializer
super(Output, self).__init__(outputElement)
self.reference = None
self.mimeType = None
self.data = []
self.fileName = None
self.filePath = None
# extract wps namespace from outputElement itself
wpsns = getNamespace(outputElement)
# <ns:Reference encoding="UTF-8" mimeType="text/csv"
# href="http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e"
# />
referenceElement = outputElement.find(nspath('Reference', ns=wpsns))
if referenceElement is not None:
# extract xlink namespace
xlinkns = get_namespaces()['xlink']
xlink_href = '{{{}}}href'.format(xlinkns)
if xlink_href in list(referenceElement.keys()):
self.reference = referenceElement.get(xlink_href)
else:
self.reference = referenceElement.get('href')
self.mimeType = referenceElement.get('mimeType')
# <LiteralOutput>
self._parseLiteralData(outputElement, 'LiteralOutput')
# <ComplexData> or <ComplexOutput>
self._parseComplexData(outputElement, 'ComplexOutput')
# <BoundingBoxOutput>
self._parseBoundingBoxData(outputElement, 'BoundingBoxOutput')
# <Data>
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927 # noqa
# </ns0:ComplexData>
# </ns0:Data>
# OR:
# <ns0:Data>
# <ns0:ComplexData encoding="UTF-8" mimeType="text/xml" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd"> # noqa
# <ns3:FeatureCollection xsi:schemaLocation="http://ogr.maptools.org/ output_0n7ij9D.xsd" xmlns:ns3="http://ogr.maptools.org/"> # noqa
# <gml:boundedBy xmlns:gml="http://www.opengis.net/gml">
# <gml:Box>
# <gml:coord><gml:X>-960123.1421801626</gml:X><gml:Y>4665723.56559387</gml:Y></gml:coord>
# <gml:coord><gml:X>-101288.6510608822</gml:X><gml:Y>5108200.011823481</gml:Y></gml:coord>
# </gml:Box>
# </gml:boundedBy>
# <gml:featureMember xmlns:gml="http://www.opengis.net/gml">
# <ns3:output fid="F0">
# <ns3:geometryProperty><gml:LineString><gml:coordinates>-960123.142180162365548,4665723.565593870356679,0 -960123.142180162365548,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -711230.141176006174646,4710278.48552671354264,0 -711230.141176006174646,4710278.48552671354264,0 -623656.677859728806652,4848552.374973464757204,0 -623656.677859728806652,4848552.374973464757204,0 # noqa -410100.337491964863148,4923834.82589447684586,0 -410100.337491964863148,4923834.82589447684586,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0</gml:coordinates></gml:LineString></ns3:geometryProperty>
# <ns3:cat>1</ns3:cat>
# <ns3:id>1</ns3:id>
# <ns3:fcat>0</ns3:fcat>
# <ns3:tcat>0</ns3:tcat>
# <ns3:sp>0</ns3:sp>
# <ns3:cost>1002619.181</ns3:cost>
# <ns3:fdist>0</ns3:fdist>
# <ns3:tdist>0</ns3:tdist>
# </ns3:output>
# </gml:featureMember>
# </ns3:FeatureCollection>
# </ns0:ComplexData>
# </ns0:Data>
#
#
# OWS BoundingBox:
#
# <wps:Data>
# <ows:BoundingBox crs="EPSG:4326" dimensions="2">
# <ows:LowerCorner>0.0 -90.0</ows:LowerCorner>
# <ows:UpperCorner>180.0 90.0</ows:UpperCorner>
# </ows:BoundingBox>
# </wps:Data>
#
dataElement = outputElement.find(nspath('Data', ns=wpsns))
if dataElement is not None:
complexDataElement = dataElement.find(
nspath('ComplexData', ns=wpsns))
if complexDataElement is not None:
self.dataType = "ComplexData"
self.mimeType = complexDataElement.get('mimeType')
if complexDataElement.text is not None and complexDataElement.text.strip() != '':
self.data.append(complexDataElement.text.strip())
for child in complexDataElement:
self.data.append(etree.tostring(child))
literalDataElement = dataElement.find(
nspath('LiteralData', ns=wpsns))
if literalDataElement is not None:
self.dataType = literalDataElement.get('dataType')
if literalDataElement.text is not None and literalDataElement.text.strip() != '':
self.data.append(literalDataElement.text.strip())
bboxDataElement = dataElement.find(nspath('BoundingBox', ns=namespaces['ows']))
if bboxDataElement is not None:
# TODO: just a workaround for data-inputs in lineage
bboxDataElement = dataElement.find(nspath('BoundingBoxData', ns=namespaces['wps']))
if bboxDataElement is not None:
self.dataType = "BoundingBoxData"
bbox = BoundingBox(bboxDataElement)
if bbox:
self.data.append(bbox)
def retrieveData(self, username=None, password=None, headers=None, verify=True, cert=None):
"""
Method to retrieve data from server-side reference:
returns "" if the reference is not known.
:param username: credentials to access the remote WPS server
:param password: credentials to access the remote WPS server
"""
url = self.reference
if url is None:
return b''
# a) 'http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e' # noqa
# b) 'http://rsg.pml.ac.uk/wps/wpsoutputs/outputImage-11294Bd6l2a.tif'
log.info('Output URL=%s' % url)
# Extract output filepath from base URL
self.fileName = url.split('/')[-1]
# The link is a local file.
# Useful when running local tests during development.
if url.startswith("file://"):
with open(url[7:]) as f:
return f.read()
if '?' in url:
spliturl = url.split('?')
# Extract output filepath from URL query string
self.fileName = spliturl[1].split('=')[1]
u = openURL(spliturl[0], spliturl[
1], method='Get', username=username, password=password,
headers=headers, verify=verify, cert=cert)
else:
u = openURL(
url, '', method='Get', username=username, password=password,
headers=headers, verify=verify, cert=cert)
return u.read()
def writeToDisk(self, path=None, username=None, password=None,
headers=None, verify=True, cert=None):
"""
Method to write an output of a WPS process to disk:
it either retrieves the referenced file from the server, or write out the content of response embedded output.
:param path: optional path to the output file, otherwise a file will be created in the local directory
with the name assigned by the server,
:param username: credentials to access the remote WPS server
:param password: credentials to access the remote WPS server
"""
# Check if ExecuteResponse contains reference to server-side output
content = self.retrieveData(username, password, headers=headers, verify=verify, cert=cert)
# ExecuteResponse contain embedded output
if content == "" and len(self.data) > 0:
self.fileName = self.identifier
for data in self.data:
content = content + data
# write out content
if content != "":
if self.fileName == "":
self.fileName = self.identifier
self.filePath = path + self.fileName
out = open(self.filePath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' % self.filePath)
class WPSException:
"""
Class representing an exception raised by a WPS.
"""
def __init__(self, root):
self.code = root.attrib.get("exceptionCode", None)
self.locator = root.attrib.get("locator", None)
textEl = root.find(nspath('ExceptionText', ns=getNamespace(root)))
if textEl is not None:
self.text = textEl.text
else:
self.text = ""
class Metadata(object):
"""Initialize an OWS Metadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.url = None
self.title = None
self.role = None
if elem is not None:
urlattrib = elem.attrib.get(nspath('href', XLINK_NAMESPACE))
if urlattrib is not None:
self.url = testXMLValue(urlattrib, True)
titleattrib = elem.attrib.get(nspath('title', XLINK_NAMESPACE))
if titleattrib is not None:
self.title = testXMLValue(titleattrib, True)
roleattrib = elem.attrib.get(nspath('role', XLINK_NAMESPACE))
if roleattrib is not None:
self.role = testXMLValue(roleattrib, True)
class Process(object):
"""
Class that represents a WPS process.
"""
def __init__(self, elem, verbose=False):
""" Initialization method extracts all available metadata from an XML document (passed in as etree object) """
# <ns0:ProcessDescriptions service="WPS" version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd" # noqa
# xml:lang="en-US" xmlns:ns0="http://www.opengis.net/wps/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> # noqa
# OR:
# <ns0:Process ns0:processVersion="1.0.0">
self._root = elem
self.verbose = verbose
wpsns = getNamespace(elem)
def get_bool_attribute(elem, attribute):
property = elem.get(attribute, '').lower()
if property == 'true':
value = True
elif property == 'false':
value = False
else:
value = None
return value
# <ProcessDescription statusSupported="true" storeSupported="true" ns0:processVersion="1.0.0">
self.processVersion = elem.get(nspath('processVersion', ns=wpsns))
self.statusSupported = get_bool_attribute(elem, "statusSupported")
self.storeSupported = get_bool_attribute(elem, "storeSupported")
self.identifier = None
self.title = None
self.abstract = None
self.metadata = []
for child in elem:
# this element's namespace
# TODO: ns not used
# ns = getNamespace(child)
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">
# gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm
# </ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue(child)
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">
# Feature Weighted Grid Statistics
# </ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue(child)
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">
# This algorithm generates area weighted statistics of a gridded dataset for
# a set of vector polygon features. Using the bounding-box that encloses
# the feature data and the time range, if provided, a subset of the gridded dataset
# is requested from the remote gridded data server.
# Polygon representations are generated for cells in the retrieved grid.
# The polygon grid-cell representations are then projected to the feature data
# coordinate reference system. The grid-cells are used to calculate per grid-cell
# feature coverage fractions. Area-weighted statistics are then calculated for each feature
# using the grid values and fractions as weights. If the gridded dataset has a time range
# the last step is repeated for each time step within the time range or all time steps
# if a time range was not supplied.
# </ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue(child)
# <ows:Metadata xlink:title="Documentation" xlink:href="http://emu.readthedocs.org/en/latest/"/>
elif child.tag.endswith('Metadata'):
self.metadata.append(Metadata(child))
if self.verbose is True:
dump(self)
# <DataInputs>
self.dataInputs = []
for inputElement in elem.findall('DataInputs/Input'):
self.dataInputs.append(Input(inputElement))
if self.verbose is True:
dump(self.dataInputs[-1], prefix='\tInput: ')
# <ProcessOutputs>
self.processOutputs = []
for outputElement in elem.findall('ProcessOutputs/Output'):
self.processOutputs.append(Output(outputElement))
if self.verbose is True:
dump(self.processOutputs[-1], prefix='\tOutput: ')
def __str__(self):
return "WPS Process: {}, title={}".format(self.identifier or '', self.title or '')
def __repr__(self):
return "<owslib.wps.Process {}>".format(self.identifier or '')
class BoundingBoxDataInput(object):
"""
Data input class for ``wps:BoundingBoxData``.
:param list data: Coordinates of lower and upper corner. Example [10, 50, 20, 60]
with lower corner=[10, 50] and upper corner=[20, 60].
:param str crs: Name of coordinate reference system. Default: "epsg:4326".
"""
def __init__(self, data, crs=None, dimensions=2):
if isinstance(data, list):
self.data = data
else:
# convenience method for string input
self.data = [float(number) for number in data.split(',')]
self.lower_corner = (self.data[0], self.data[1])
self.upper_corner = (self.data[2], self.data[3])
self.dimensions = dimensions
self.crs = crs or 'epsg:4326'
def get_xml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
'''
<wps:Data>
<wps:BoundingBoxData crs="EPSG:4326" dimenstions="2">
<ows:LowerCorner>51.9 7.0</ows:LowerCorner>
<ows:UpperCorner>53.0 8.0</ows:UpperCorner>
</wps:BoundingBoxData>
</wps:Data>
'''
data_el = etree.Element(nspath_eval('wps:Data', namespaces))
attrib = dict()
if self.crs:
attrib['crs'] = self.crs
if self.dimensions:
attrib['dimensions'] = str(self.dimensions)
bbox_el = etree.SubElement(
data_el, nspath_eval('wps:BoundingBoxData', namespaces), attrib=attrib)
lc_el = etree.SubElement(
bbox_el, nspath_eval('ows:LowerCorner', namespaces))
lc_el.text = "{0[0]} {0[1]}".format(self.lower_corner)
uc_el = etree.SubElement(
bbox_el, nspath_eval('ows:UpperCorner', namespaces))
uc_el.text = "{0[0]} {0[1]}".format(self.upper_corner)
return data_el
class ComplexDataInput(IComplexDataInput, ComplexData):
def __init__(self, value, mimeType=None, encoding=None, schema=None):
super(ComplexDataInput, self).__init__(
mimeType=mimeType, encoding=encoding, schema=schema)
self.value = value
def getXml(self):
if is_reference(self.value):
return self.complexDataAsReference()
else:
return self.complexDataRaw()
def complexDataAsReference(self):
"""
<wps:Reference xlink:href="http://somewhere/test.xml"/>
"""
attrib = {nspath_eval("xlink:href", namespaces): self.value}
if self.encoding:
attrib['encoding'] = self.encoding
if self.schema:
attrib['schema'] = self.schema
if self.mimeType:
attrib['mimeType'] = self.mimeType
refElement = etree.Element(nspath_eval('wps:Reference', namespaces), attrib)
return refElement
def complexDataRaw(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
attrib = dict()
if self.encoding:
attrib['encoding'] = self.encoding
if self.schema:
attrib['schema'] = self.schema
if self.mimeType:
attrib['mimeType'] = self.mimeType
complexDataElement = etree.SubElement(
dataElement, nspath_eval('wps:ComplexData', namespaces), attrib=attrib)
complexDataElement.text = self.value
return dataElement
class FeatureCollection(IComplexDataInput):
'''
Base class to represent a Feature Collection used as input to a WPS request.
The method getXml() is invoked by the WPS execute() method to build the WPS request.
All subclasses must implement the getXml() method to provide their specific XML.
Implements IComplexDataInput.
'''
def __init__(self):
pass
def getXml(self):
raise NotImplementedError
class WFSFeatureCollection(FeatureCollection):
'''
FeatureCollection specified by a WFS query.
All subclasses must implement the getQuery() method to provide the specific query portion of the XML.
'''
def __init__(self, wfsUrl, wfsQuery, wfsMethod=None):
'''
wfsUrl: the WFS service URL
example: wfsUrl = "http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
wfsQuery : a WFS query instance
'''
self.url = wfsUrl
self.query = wfsQuery
self.method = wfsMethod
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd"> # noqa
# .......
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
def getXml(self):
root = etree.Element(nspath_eval('wps:Reference', namespaces),
attrib={nspath_eval("xlink:href", namespaces): self.url})
if self.method:
root.attrib['method'] = self.method
bodyElement = etree.SubElement(
root, nspath_eval('wps:Body', namespaces))
getFeatureElement = etree.SubElement(
bodyElement, nspath_eval('wfs:GetFeature', namespaces),
attrib={"service": "WFS",
"version": "1.1.0",
"outputFormat": "text/xml; subtype=gml/3.1.1",
nspath_eval("xsi:schemaLocation", namespaces): "%s %s" % (namespaces['wfs'], WFS_SCHEMA_LOCATION)})
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
getFeatureElement.append(self.query.getXml())
return root
class WFSQuery(IComplexDataInput):
'''
Class representing a WFS query, for insertion into a WFSFeatureCollection instance.
Implements IComplexDataInput.
'''
def __init__(self, typeName, propertyNames=[], filters=[]):
self.typeName = typeName
self.propertyNames = propertyNames
self.filters = filters
def getXml(self):
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
queryElement = etree.Element(
nspath_eval('wfs:Query', namespaces), attrib={"typeName": self.typeName})
for propertyName in self.propertyNames:
propertyNameElement = etree.SubElement(
queryElement, nspath_eval('wfs:PropertyName', namespaces))
propertyNameElement.text = propertyName
if len(self.filters) > 0:
filterElement = etree.SubElement(
queryElement, nspath_eval('ogc:Filter', namespaces))
for filter in self.filters:
# gmlObjectIdElement
etree.SubElement(
filterElement, nspath_eval('ogc:GmlObjectId', namespaces),
attrib={nspath_eval('gml:id', namespaces): filter})
return queryElement
class GMLMultiPolygonFeatureCollection(FeatureCollection):
'''
Class that represents a FeatureCollection defined as a GML multi-polygon.
'''
def __init__(self, polygons):
'''
Initializer accepts an array of polygons, where each polygon is an array of (lat,lon) tuples.
Example: polygons = [ [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)], # noqa
[(-92.8184, 39.5273), (-92.8184, 37.418), (-91.2363, 37.418), (-91.2363, 39.5273), (-92.8184, 39.5273)] ]
'''
self.polygons = polygons
def getXml(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
<gml:featureMembers xmlns:ogc="http://www.opengis.net/ogc"
xmlns:draw="gov.usgs.cida.gdp.draw" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ows="http://www.opengis.net/ows" xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="gov.usgs.cida.gdp.draw http://cida.usgs.gov/climate/derivative/xsd/draw.xsd"> # noqa
<gml:box gml:id="box.1">
<gml:the_geom>
<gml:MultiPolygon srsDimension="2"
srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">
<gml:polygonMember>
<gml:Polygon>
<gml:exterior>
<gml:LinearRing>
<gml:posList>-102.8184 39.5273 -102.8184 37.418 -101.2363 37.418 -101.2363 39.5273 -102.8184 39.5273</gml:posList>
</gml:LinearRing>
</gml:exterior>
</gml:Polygon>
</gml:polygonMember>
</gml:MultiPolygon>
</gml:the_geom>
<gml:ID>0</gml:ID>
</gml:box>
</gml:featureMembers>
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
complexDataElement = etree.SubElement(
dataElement, nspath_eval('wps:ComplexData', namespaces),
attrib={"mimeType": "text/xml", "schema": GML_SCHEMA_LOCATION})
featureMembersElement = etree.SubElement(
complexDataElement, nspath_eval('gml:featureMembers', namespaces),
attrib={nspath_eval("xsi:schemaLocation", namespaces): "%s %s" % (DRAW_NAMESPACE, DRAW_SCHEMA_LOCATION)})
boxElement = etree.SubElement(featureMembersElement, nspath_eval(
'gml:box', namespaces), attrib={nspath_eval("gml:id", namespaces): "box.1"})
geomElement = etree.SubElement(
boxElement, nspath_eval('gml:the_geom', namespaces))
multiPolygonElement = etree.SubElement(
geomElement, nspath_eval('gml:MultiPolygon', namespaces),
attrib={"srsDimension": "2", "srsName": "http://www.opengis.net/gml/srs/epsg.xml#4326"})
for polygon in self.polygons:
polygonMemberElement = etree.SubElement(
multiPolygonElement, nspath_eval('gml:polygonMember', namespaces))
polygonElement = etree.SubElement(
polygonMemberElement, nspath_eval('gml:Polygon', namespaces))
exteriorElement = etree.SubElement(
polygonElement, nspath_eval('gml:exterior', namespaces))
linearRingElement = etree.SubElement(
exteriorElement, nspath_eval('gml:LinearRing', namespaces))
posListElement = etree.SubElement(
linearRingElement, nspath_eval('gml:posList', namespaces))
posListElement.text = ' '.join(
["%s %s" % (x, y) for x, y in polygon[:]])
idElement = etree.SubElement(
boxElement, nspath_eval('gml:ID', namespaces))
idElement.text = "0"
return dataElement
def monitorExecution(execution, sleepSecs=3, download=False, filepath=None):
'''
Convenience method to monitor the status of a WPS execution till it completes (succesfully or not),
and write the output to file after a succesfull job completion.
:param execution: WPSExecution instance
:param int sleepSecs: number of seconds to sleep in between check status invocations
:param download: True to download the output when the process terminates, False otherwise
:param filepath: optional path to output file (if downloaded=True), otherwise filepath
will be inferred from response document
'''
while execution.isComplete() is False:
execution.checkStatus(sleepSecs=sleepSecs)
log.info('Execution status: %s' % execution.status)
if execution.isSucceded():
if download:
execution.getOutput(filepath=filepath)
else:
for output in execution.processOutputs:
if output.reference is not None:
log.info('Output URL=%s' % output.reference)
else:
for ex in execution.errors:
log.error('Error: code=%s, locator=%s, text=%s' %
(ex.code, ex.locator, ex.text))
def printValue(value):
'''
Utility method to format a value for printing.
'''
# ComplexData type
if isinstance(value, ComplexData):
return "mimeType=%s, encoding=%s, schema=%s" % (value.mimeType, value.encoding, value.schema)
# other type
else:
return value
def printInputOutput(value, indent=''):
'''
Utility method to inspect an input/output element.
'''
# InputOutput fields
print(('{} identifier={}, title={}, abstract={}, data type={}'.format(
indent, value.identifier, value.title, value.abstract, value.dataType)))
for val in value.allowedValues:
print(('{} Allowed Value: {}'.format(indent, printValue(val))))
if value.anyValue:
print(' Any value allowed')
for val in value.supportedValues:
print(('{} Supported Value: {}'.format(indent, printValue(val))))
print(('{} Default Value: {} '.format(indent, printValue(value.defaultValue))))
# Input fields
if isinstance(value, Input):
print(('{} minOccurs={}, maxOccurs={}'.format(
indent, value.minOccurs, value.maxOccurs)))
# Output fields
if isinstance(value, Output):
print(('{} reference={}, mimeType={}'.format(
indent, value.reference, value.mimeType)))
for datum in value.data:
print(('{} Data Value: {}'.format(indent, printValue(datum))))
class Languages(object):
"""Initialize a WPS Languages construct"""
def __init__(self, infoset):
self._root = infoset
self.default = None
self.supported = []
for element in self._root:
if element.tag.endswith('Default'):
self.default = testXMLValue(element[0])
elif element.tag.endswith('Supported'):
for child in element:
self.supported.append(testXMLValue(child))
def __repr__(self):
return "<owslib.wps.Languages default='{}' supported={}>".format(self.default, self.supported)
| bsd-3-clause |
RyokoAkizuki/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/actions/gyptest-default.py | 243 | 2407 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
# Android also can't do this as it doesn't have order-only dependencies.
if test.format in ['ninja', 'android']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
test.pass_test()
| gpl-3.0 |
Superchicken1/SambaFlow | python/traffic-prediction/src/models/complete_vector/NN.py | 1 | 1277 | from keras.layers import Dense
from keras.models import Sequential
from sklearn import preprocessing
from src.misc.evaluation import mape
import numpy as np
import pandas as pd
x_train = pd.read_csv('train_X.csv', index_col=0)
x_test = pd.read_csv('test_X.csv', index_col=0)
y_train = pd.read_csv('train_Y.csv', index_col=0)
y_test = pd.read_csv('test_Y.csv', index_col=0)
x_dim = len(x_train.columns)
y_dim = len(y_train.columns)
x_train = x_train.as_matrix()
x_test = x_test.as_matrix()
y_train = y_train.as_matrix()
y_test = y_test.as_matrix()
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(np.concatenate((x_train, x_test)))
X_train_scale = min_max_scaler.transform(x_train)
X_test_scale = min_max_scaler.transform(x_test)
model = Sequential()
model.add(Dense(input_dim=x_dim, output_dim=100, activation='relu'))
model.add(Dense(input_dim=100, output_dim=200,activation='relu'))
model.add(Dense(input_dim=200, output_dim=y_dim,activation='relu'))
model.compile(loss='mean_absolute_percentage_error', optimizer='rmsprop')
model.fit(X_train_scale, y_train,
batch_size=1, epochs=50, verbose=2,
validation_data=(X_test_scale, y_test), shuffle=False)
y = model.predict(X_test_scale, batch_size=1)
mape = mape(y, y_test)
print(mape) | apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.