repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
richpolis/siveinpy | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| mit |
bestvibes/neo4j-social-network | env/lib/python2.7/encodings/hex_codec.py | 528 | 2309 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
DarkPrince304/MozDef | bot/mozdefbot.py | 6 | 14859 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jeff Bryner jbryner@mozilla.com
"""mozdef bot using KitnIRC."""
import json
import kitnirc.client
import kitnirc.modular
import kombu
import logging
import netaddr
import os
import pygeoip
import pytz
import random
import select
import sys
import threading
from configlib import getConfig, OptionParser
from datetime import datetime
from dateutil.parser import parse
from kombu import Connection, Queue, Exchange
from kombu.mixins import ConsumerMixin
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
greetz = ["mozdef bot in da house",
"mozdef here..what's up",
"mozdef has joined the room..no one panic",
"mozdef bot here..nice to see everyone"]
panics = ["don't panic",
".. a towel has immense psychological value",
"..but in fact the message was this: 'So Long, and Thanks for All the Fish.'",
"42",
"What I need..is a strong drink and a peer group --Douglas Adams",
"Eddies in the space-time continuum.",
"segmentation fault..SEP"
]
if os.path.isfile('quotes.txt'):
quotes = open('quotes.txt').readlines()
else:
quotes = ['nothing to say..add a quotes.txt file!']
colors = {'red': '\x034\x02',
'normal': '\x03\x02',
'blue': '\x032\x02',
'green': '\x033\x02',
'yellow': '\x038\x02',
}
keywords = {'INFORMATIONAL': colors['green'],
'INFO': colors['green'],
'WARNING': colors['yellow'],
'CRITICAL': colors['red'],
}
def colorify(data):
for i in keywords:
data = data.replace(i, keywords[i] + i + colors['normal'], 1)
return data
def run_async(func):
"""
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
from: http://code.activestate.com/recipes/576684-simple-threading-decorator/
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def toUTC(suspectedDate, localTimeZone=None):
'''make a UTC date out of almost anything'''
utc = pytz.UTC
objDate = None
if localTimeZone is None:
localTimeZone = options.defaultTimeZone
if type(suspectedDate) == str:
objDate = parse(suspectedDate, fuzzy=True)
elif type(suspectedDate) == datetime:
objDate = suspectedDate
if objDate.tzinfo is None:
objDate = pytz.timezone(localTimeZone).localize(objDate)
objDate = utc.normalize(objDate)
else:
objDate = utc.normalize(objDate)
if objDate is not None:
objDate = utc.normalize(objDate)
return objDate
def getQuote():
aquote = '{0} --Mos Def'.format(
quotes[random.randint(0, len(quotes) - 1)].strip())
return aquote
def isIP(ip):
try:
netaddr.IPNetwork(ip)
return True
except:
return False
def ipLocation(ip):
location = ""
try:
gi = pygeoip.GeoIP('GeoLiteCity.dat', pygeoip.MEMORY_CACHE)
geoDict = gi.record_by_addr(str(netaddr.IPNetwork(ip)[0]))
if geoDict is not None:
location = geoDict['country_name']
if geoDict['country_code'] in ('US'):
if geoDict['metro_code']:
location = location + '/{0}'.format(geoDict['metro_code'])
except Exception as e:
location = ""
return location
def formatAlert(jsonDictIn):
# defaults
severity = 'INFO'
summary = ''
category = ''
if 'severity' in jsonDictIn.keys():
severity = jsonDictIn['severity']
if 'summary' in jsonDictIn.keys():
summary = jsonDictIn['summary']
if 'category' in jsonDictIn.keys():
category = jsonDictIn['category']
return colorify('{0}: {1} {2}'.format(severity, colors['blue']
+ category
+ colors['normal'],
summary))
class mozdefBot():
def __init__(self, ):
# Logging initialization
self.log_handler = logging.StreamHandler()
self.log_formatter = logging.Formatter("%(asctime)s %(message)s")
self.log_handler.setFormatter(self.log_formatter)
self.root_logger = logging.getLogger()
self.root_logger.addHandler(self.log_handler)
self.root_logger.setLevel(logging.INFO)
self.client = kitnirc.client.Client(options.host, options.port)
self.controller = kitnirc.modular.Controller(self.client, options.configfile)
self.controller.load_config()
self.controller.start()
self.client.root_logger = self.root_logger
self.client.connect(
nick=options.nick,
username=options.username or options.nick,
realname=options.realname or options.username or options.nick,
password=options.password,
ssl=True
)
self.mqConsumer = None
def run(self):
try:
@self.client.handle('WELCOME')
def join_channels(client, *params):
if not options.join:
return
for chan in options.join.split(","):
if chan in options.channelkeys.keys():
client.join(chan, options.channelkeys[chan])
else:
client.join(chan)
# start the mq consumer
consumeAlerts(self)
@self.client.handle('LINE')
def line_handler(client, *params):
try:
self.root_logger.debug('linegot:' + line)
except AttributeError as e:
# catch error in kitnrc : chan.remove(actor) where channel
# object has no attribute remove
pass
@self.client.handle('PRIVMSG')
def priv_handler(client, actor, recipient, message):
self.root_logger.debug(
'privmsggot:' + message + ' from ' + actor)
if "!help" in message:
self.client.msg(
recipient, "Help on it's way...try these:")
self.client.msg(
recipient, "!quote --get a quote from my buddy Mos Def")
self.client.msg(recipient, "!panic --panic (or not )")
self.client.msg(
recipient, "!ipinfo --do a geoip lookup on an ip address")
if "!quote" in message:
self.client.msg(recipient, getQuote())
if "!panic" in message:
self.client.msg(recipient, random.choice(panics))
if "!ipinfo" in message:
for i in message.split():
if isIP(i):
ip = netaddr.IPNetwork(i)[0]
if (not ip.is_loopback() and not ip.is_private() and not ip.is_reserved()):
self.client.msg(
recipient, "{0} location: {1}".format(i, ipLocation(i)))
else:
self.client.msg(
recipient, "{0}: hrm..loopback? private ip?".format(i))
@self.client.handle('JOIN')
def join_handler(client, user, channel, *params):
self.root_logger.debug('%r' % channel)
if user.nick == options.nick:
self.client.msg(channel, colorify(random.choice(greetz)))
self.client.run()
except KeyboardInterrupt:
self.client.disconnect()
if self.mqConsumer:
try:
self.mqConsumer.should_stop = True
except:
pass
except Exception as e:
self.client.root_logger.error('bot error..quitting {0}'.format(e))
self.client.disconnect()
if self.mqConsumer:
try:
self.mqConsumer.should_stop = True
except:
pass
class alertConsumer(ConsumerMixin):
'''read in alerts and hand back to the
kitnirc class for publishing
'''
def __init__(self, mqAlertsConnection, alertQueue, alertExchange, ircBot):
self.connection = mqAlertsConnection # default connection for the kombu mixin
self.alertsConnection = mqAlertsConnection
self.alertQueue = alertQueue
self.alertExchange = alertExchange
self.ircBot = ircBot
ircBot.mqConsumer = self
def get_consumers(self, Consumer, channel):
consumer = Consumer(
self.alertQueue,
callbacks=[self.on_message],
accept=['json'])
consumer.qos(prefetch_count=options.prefetch)
return [consumer]
def on_message(self, body, message):
try:
# just to be safe..check what we were sent.
if isinstance(body, dict):
bodyDict = body
elif isinstance(body, str) or isinstance(body, unicode):
try:
bodyDict = json.loads(body) # lets assume it's json
except ValueError as e:
# not json..ack but log the message
logger.exception(
"alertworker exception: unknown body type received %r" % body)
return
else:
logger.exception(
"alertworker exception: unknown body type received %r" % body)
return
# process valid message
# see where we send this alert
ircchannel = options.alertircchannel
if 'ircchannel' in bodyDict.keys():
if bodyDict['ircchannel'] in options.join.split(","):
ircchannel = bodyDict['ircchannel']
self.ircBot.client.msg(ircchannel, formatAlert(bodyDict))
message.ack()
except ValueError as e:
logger.exception(
"alertworker exception while processing events queue %r" % e)
@run_async
def consumeAlerts(ircBot):
# connect and declare the message queue/kombu objects.
# server/exchange/queue
mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,
options.mqpassword,
options.mqalertserver,
options.mqport)
mqAlertConn = Connection(mqConnString)
# Exchange for alerts we pass to plugins
alertExchange = Exchange(name=options.alertExchange,
type='topic',
durable=True,
delivery_mode=1)
alertExchange(mqAlertConn).declare()
# Queue for the exchange
alertQueue = Queue(options.queueName,
exchange=alertExchange,
routing_key=options.alerttopic,
durable=False,
no_ack=(not options.mqack))
alertQueue(mqAlertConn).declare()
# consume our alerts.
alertConsumer(mqAlertConn, alertQueue, alertExchange, ircBot).run()
def initConfig():
# initialize config options
# sets defaults or overrides from config file.
# change this to your default zone for when it's not specified
# in time strings
options.defaultTimeZone = getConfig('defaulttimezone',
'US/Pacific',
options.configfile)
# irc options
options.host = getConfig('host', 'irc.somewhere.com', options.configfile)
options.nick = getConfig('nick', 'mozdefnick', options.configfile)
options.port = getConfig('port', 6697, options.configfile)
options.username = getConfig('username', 'username', options.configfile)
options.realname = getConfig('realname', 'realname', options.configfile)
options.password = getConfig('password', '', options.configfile)
options.join = getConfig('join', '#mzdf', options.configfile)
options.alertircchannel = getConfig(
'alertircchannel',
'',
options.configfile)
options.channelkeys = json.loads(getConfig(
'channelkeys',
'{"#somechannel": "somekey"}',
options.configfile))
# message queue options
# server hostname
options.mqalertserver = getConfig(
'mqalertserver',
'localhost',
options.configfile)
# queue exchange name
options.alertExchange = getConfig(
'alertexchange',
'alerts',
options.configfile)
# queue name
options.queueName = getConfig(
'alertqueuename',
'alertBot',
options.configfile)
# queue topic
options.alerttopic = getConfig(
'alerttopic',
'mozdef.*',
options.configfile)
# how many messages to ask for at once
options.prefetch = getConfig('prefetch', 50, options.configfile)
options.mquser = getConfig('mquser', 'guest', options.configfile)
options.mqpassword = getConfig('mqpassword', 'guest', options.configfile)
options.mqport = getConfig('mqport', 5672, options.configfile)
# mqack=True sets persistant delivery, False sets transient delivery
options.mqack = getConfig('mqack', True, options.configfile)
if options.alertircchannel == '':
options.alertircchannel = options.join.split(",")[0]
if __name__ == "__main__":
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
parser = OptionParser()
parser.add_option(
"-c", dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
# run the IRC class
# which in turn starts the mq consumer
theBot = mozdefBot()
theBot.run()
# vim: set ts=4 sts=4 sw=4 et:
| mpl-2.0 |
andela-bojengwa/talk | venv/lib/python2.7/site-packages/pip/_vendor/progress/bar.py | 404 | 2707 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Progress
from .helpers import WritelnMixin
class Bar(WritelnMixin, Progress):
width = 32
message = ''
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
hide_cursor = True
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class FillingSquaresBar(ChargingBar):
empty_fill = '▢'
fill = '▣'
class FillingCirclesBar(ChargingBar):
empty_fill = '◯'
fill = '◉'
class IncrementalBar(Bar):
phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
def update(self):
nphases = len(self.phases)
expanded_length = int(nphases * self.width * self.progress)
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
phase = expanded_length - (filled_length * nphases)
message = self.message % self
bar = self.phases[-1] * filled_length
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, empty_length - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class ShadyBar(IncrementalBar):
phases = (' ', '░', '▒', '▓', '█')
| mit |
atacai/server-tools | auth_admin_passkey/__openerp__.py | 26 | 1572 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Admin Passkey module for OpenERP
# Copyright (C) 2013-2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Authentification - Admin Passkey',
'version': '2.1.1',
'category': 'base',
'author': "GRAP,Odoo Community Association (OCA)",
'website': 'http://www.grap.coop',
'license': 'AGPL-3',
'depends': [
'mail',
],
'data': [
'data/ir_config_parameter.xml',
'view/res_config_view.xml',
],
'demo': [],
'js': [],
'css': [],
'qweb': [],
'images': [],
'post_load': '',
'application': False,
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
rahul67/hue | desktop/core/src/desktop/lib/wsgiserver.py | 4 | 73699 | #!/usr/bin/env python
# This file stolen and edited from cherrypy, so we should not assign
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TODO(todd) run diff and contribute changes back to cherrypy?
"""
A high-speed, production ready, thread pooled, generic WSGI server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery):
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher:
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set these attributes:
server.ssl_certificate = <filename>
server.ssl_private_key = <filename>
if __name__ == '__main__':
try:
server.start()
except KeyboardInterrupt:
server.stop()
This won't call the CherryPy engine (application side) at all, only the
WSGI server, which is independant from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue:
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop:
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
req.read_headers()
req.respond()
-> response = wsgi_app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
import base64
import os
import Queue
import re
quoted_slash = re.compile("(?i)%2F")
import rfc822
import socket
try:
import cStringIO as StringIO
except ImportError:
import StringIO
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import sys
import threading
import time
import traceback
from urllib import unquote
from urlparse import urlparse
import warnings
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
import errno
import logging
LOG = logging.getLogger(__name__)
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return dict.fromkeys(nums).keys()
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING',
'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
'WWW-AUTHENTICATE']
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = apps.items()
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort()
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return ''.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
send: the 'send' method from the connection's socket object.
wsgi_app: the WSGI application to call.
environ: a partial WSGI environ (server and connection entries).
The caller MUST set the following entries:
* All wsgi.* entries, including .input
* SERVER_NAME and SERVER_PORT
* Any SSL_* entries
* Any custom entries like REMOTE_ADDR and REMOTE_PORT
* SERVER_SOFTWARE: the value to write in the "Server" response header.
* ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
the response. From RFC 2145: "An HTTP server SHOULD send a
response version equal to the highest version for which the
server is at least conditionally compliant, and whose major
version is less than or equal to the one received in the
request. An HTTP server MUST NOT send a version for which
it is not at least conditionally compliant."
outheaders: a list of header tuples to write in the response.
ready: when True, the request has been parsed and is ready to begin
generating the response. When False, signals the calling Connection
that the response should not be generated and the connection should
close.
close_connection: signals the calling Connection that the request
should close. This does not imply an error! The client and/or
server may each request that the connection be closed.
chunked_write: if True, output will be encoded with the "chunked"
transfer-coding. This value is set automatically inside
send_headers.
"""
max_request_header_size = 0
max_request_body_size = 0
def __init__(self, wfile, environ, wsgi_app):
self.rfile = environ['wsgi.input']
self.wfile = wfile
self.environ = environ.copy()
self.wsgi_app = wsgi_app
self.ready = False
self.started_response = False
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = False
self.chunked_write = False
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile.maxlen = self.max_request_header_size
self.rfile.bytes_read = 0
try:
self._parse_request()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large")
return
def _parse_request(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == "\r\n":
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
environ = self.environ
try:
method, path, req_protocol = request_line.strip().split(" ", 2)
except ValueError:
self.simple_response(400, "Malformed Request-Line")
return
environ["REQUEST_METHOD"] = method
# path may be an abs_path (including "http://host.domain.tld");
scheme, location, path, params, qs, frag = urlparse(path)
if frag:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
environ["wsgi.url_scheme"] = scheme
if params:
path = path + ";" + params
environ["SCRIPT_NAME"] = ""
# Unquote the path+params (e.g. "/this%20path" -> "this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
atoms = [unquote(x) for x in quoted_slash.split(path)]
path = "%2F".join(atoms)
environ["PATH_INFO"] = path
# Note that, like wsgiref and most other WSGI servers,
# we unquote the path but not the query string.
environ["QUERY_STRING"] = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
sp = int(server_protocol[5]), int(server_protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
environ["SERVER_PROTOCOL"] = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
# If the Request-URI was an absoluteURI, use its location atom.
if location:
environ["SERVER_NAME"] = location
# then all the http headers
try:
self.read_headers()
except ValueError, ex:
self.simple_response("400 Bad Request", repr(ex.args))
return
mrbs = self.max_request_body_size
if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large")
return
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if environ.get("HTTP_CONNECTION", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = environ.get("HTTP_TRANSFER_ENCODING")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if environ.get("HTTP_EXPECT", "") == "100-continue":
self.simple_response(100)
self.ready = True
def read_headers(self):
"""Read header lines from the incoming stream."""
environ = self.environ
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == '\r\n':
# Normal end of headers
break
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
k, v = line.split(":", 1)
k, v = k.strip().upper(), v.strip()
envname = "HTTP_" + k.replace("-", "_")
if k in comma_separated_headers:
existing = environ.get(envname)
if existing:
v = ", ".join((existing, v))
environ[envname] = v
ct = environ.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
environ["CONTENT_TYPE"] = ct
cl = environ.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
environ["CONTENT_LENGTH"] = cl
def decode_chunked(self):
"""Decode the 'chunked' transfer coding."""
cl = 0
data = StringIO.StringIO()
while True:
line = self.rfile.readline().strip().split(";", 1)
chunk_size = int(line.pop(0), 16)
if chunk_size <= 0:
break
## if line: chunk_extension = line[0]
cl += chunk_size
data.write(self.rfile.read(chunk_size))
crlf = self.rfile.read(2)
if crlf != "\r\n":
self.simple_response("400 Bad Request",
"Bad chunked transfer coding "
"(expected '\\r\\n', got %r)" % crlf)
return
# Grab any trailer headers
self.read_headers()
data.seek(0)
self.environ["wsgi.input"] = data
self.environ["CONTENT_LENGTH"] = str(cl) or ""
return True
def respond(self):
"""Call the appropriate WSGI app and write its iterable output."""
# Set rfile.maxlen to ensure we don't read past Content-Length.
# This will also be used to read the entire request body if errors
# are raised before the app can read the body.
if self.chunked_read:
# If chunked, Content-Length will be 0.
self.rfile.maxlen = self.max_request_body_size
else:
cl = int(self.environ.get("CONTENT_LENGTH", 0))
if self.max_request_body_size:
self.rfile.maxlen = min(cl, self.max_request_body_size)
else:
self.rfile.maxlen = cl
self.rfile.bytes_read = 0
try:
self._respond()
except MaxSizeExceeded:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large")
return
def _respond(self):
if self.chunked_read:
if not self.decode_chunked():
self.close_connection = True
return
response = self.wsgi_app(self.environ, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
# Request Entity Too Large
self.close_connection = True
buf.append("Connection: close\r\n")
buf.append("\r\n")
if msg:
buf.append(msg)
try:
self.wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.started_response = True
self.status = status
self.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
if not self.sent_headers:
self.sent_headers = True
self.send_headers()
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
self.wfile.sendall("".join(buf))
else:
self.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers."""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.environ["REQUEST_METHOD"] != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
size = self.rfile.maxlen - self.rfile.bytes_read
if size > 0:
self.rfile.read(size)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
try:
buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
except TypeError:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not a string.")
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not a string.")
else:
raise
buf.append("\r\n")
self.wfile.sendall("".join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
if not _fileobject_uses_str_type:
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
# NOTE: (HUE-2893) This was backported from CherryPy PR
# #14, which fixes uploading chunked files with SSL.
elif n > left:
# Could happen with SSL transport. Differ
# extra data read to the next call
buf.write(data[:left])
self._rbuf.write(data[left:])
del data
break
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class SSL_fileobject(CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise NoSSLError()
raise FatalSSLAlert(*e.args)
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
class HTTPConnection(object):
"""An HTTP connection (active socket).
socket: the raw socket object (usually TCP) for this connection.
wsgi_app: the WSGI application for this server/connection.
environ: a WSGI environ template. This will be copied for each request.
rfile: a fileobject for reading from the socket.
send: a function for writing (+ flush) to the socket.
"""
rbufsize = -1
RequestHandlerClass = HTTPRequest
environ = {"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.multithread": True,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
"wsgi.errors": sys.stderr,
}
def __init__(self, sock, wsgi_app, environ):
self.socket = sock
self.wsgi_app = wsgi_app
# Copy the class environ into self.
self.environ = self.environ.copy()
self.environ.update(environ)
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
self.rfile.ssl_timeout = timeout
self.wfile = SSL_fileobject(sock, "wb", -1)
self.wfile.ssl_timeout = timeout
else:
self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
self.wfile = CP_fileobject(sock, "wb", -1)
# Wrap wsgi.input but not HTTPConnection.rfile itself.
# We're also not setting maxlen yet; we'll do that separately
# for headers and body for each iteration of self.communicate
# (if maxlen is 0 the wrapper doesn't check length).
self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
def communicate(self):
"""Read each request and respond appropriately."""
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.wfile, self.environ,
self.wsgi_app)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if not req.ready:
return
req.respond()
if req.close_connection:
return
except socket.error, e:
errnum = e.args[0]
if errnum == 'timed out':
if req and not req.sent_headers:
req.simple_response("408 Request Timeout")
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
req.simple_response("500 Internal Server Error",
format_exc())
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert, e:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
req.wfile = CP_fileobject(self.socket._sock, "wb", -1)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception, e:
if req and not req.sent_headers:
req.simple_response("500 Internal Server Error", format_exc())
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
server: the HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it.
ready: a simple flag for the calling server to know when this thread
has begun polling the Queue.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
def __init__(self, server):
self.ready = False
self.server = server
threading.Thread.__init__(self)
def run(self):
try:
self.ready = True
while True:
try:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
try:
conn.communicate()
finally:
conn.close()
self.conn = None
except Exception, ex:
LOG.exception('WSGI (%s) error: %s' % (self, ex))
except (KeyboardInterrupt, SystemExit), exc:
self.server.interrupt = exc
return
class ThreadPool(object):
"""A Request Queue for the CherryPyWSGIServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = Queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in xrange(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP WSGIServer " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in xrange(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP WSGIServer " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in xrange(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
worker.join(timeout)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
if SSL and isinstance(c.socket, SSL.ConnectionType):
# pyOpenSSL.socket.shutdown takes no args
c.socket.shutdown()
else:
c.socket.shutdown(socket.SHUT_RD)
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt), exc1:
pass
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
*args: the arguments to create the wrapped SSL.Connection(*args).
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout'):
exec """def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class CherryPyWSGIServer(object):
"""An HTTP server for WSGI.
bind_addr: The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.
wsgi_app: the WSGI 'application callable'; multiple WSGI applications
may be passed as (path_prefix, app) pairs.
numthreads: the number of worker threads to create (default 10).
server_name: the string to set for WSGI's SERVER_NAME environ entry.
Defaults to socket.gethostname().
max: the maximum number of queued requests (defaults to -1 = no limit).
request_queue_size: the 'backlog' argument to socket.listen();
specifies the maximum number of queued connections (default 5).
timeout: the timeout in seconds for accepted connections (default 10).
nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
option.
protocol: the version string to write in the Status-Line of all
HTTP responses. For example, "HTTP/1.1" (the default). This
also limits the supported features used in the response.
SSL/HTTPS
---------
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
ssl_certificate: the filename of the server SSL certificate.
ssl_privatekey: the filename of the server's private key file.
If either of these is None (both are None by default), this server
will not use SSL. If both are given and are valid, they will be read
on server start and used in the SSL context for the listening socket.
"""
protocol = "HTTP/1.1"
_bind_addr = "127.0.0.1"
version = "CherryPy/3.1.2"
ready = False
_interrupt = None
nodelay = True
ConnectionClass = HTTPConnection
environ = {}
# Paths to certificate and private key files
ssl_certificate = None
ssl_private_key = None
ssl_cipher_list = "DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2"
ssl_password_cb = None
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
if callable(wsgi_app):
# We've been handed a single wsgi_app, in CP-2.1 style.
# Assume it's mounted at "".
self.wsgi_app = wsgi_app
else:
# We've been handed a list of (path_prefix, wsgi_app) tuples,
# so that the server can call different wsgi_apps, and also
# correctly set SCRIPT_NAME.
warnings.warn("The ability to pass multiple apps is deprecated "
"and will be removed in 3.2. You should explicitly "
"include a WSGIPathInfoDispatcher instead.",
DeprecationWarning)
self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
self.bind_server()
self.listen_and_loop()
def bind_server(self):
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try:
os.unlink(self.bind_addr)
except IOError:
pass
# So everyone can access the socket...
try:
os.chmod(self.bind_addr, 0777)
except IOError:
pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
# Probably a DNS issue. Assume IPv4.
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self._bind(af, socktype, proto)
except socket.error, msg:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error, msg
def listen_and_loop(self):
"""
Listen on the socket, and then loop forever accepting and handling
connections.
"""
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def _bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_certificate and self.ssl_private_key:
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
ctx = SSL.Context(SSL.SSLv23_METHOD)
if self.ssl_password_cb is not None:
ctx.set_passwd_cb(self.ssl_password_cb)
ctx.set_cipher_list(self.ssl_cipher_list)
try:
ctx.use_privatekey_file(self.ssl_private_key)
ctx.use_certificate_file(self.ssl_certificate)
except Exception, ex:
logging.exception('SSL key and certificate could not be found or have a problem')
raise ex
ctx.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
self.socket = SSLConnection(ctx, self.socket)
self.populate_ssl_environ()
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (not isinstance(self.bind_addr, basestring)
and self.bind_addr[0] == '::' and family == socket.AF_INET6):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
prevent_socket_inheritance(s)
if not self.ready:
return
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
environ = self.environ.copy()
# SERVER_SOFTWARE is common for IIS. It's also helpful for
# us to pass a default value for the "Server" response header.
if environ.get("SERVER_SOFTWARE") is None:
environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
environ["SERVER_NAME"] = self.server_name
if isinstance(self.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
environ["SERVER_PORT"] = ""
else:
environ["SERVER_PORT"] = str(self.bind_addr[1])
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
environ["REMOTE_ADDR"] = addr[0]
environ["REMOTE_PORT"] = str(addr[1])
conn = self.ConnectionClass(s, self.wsgi_app, environ)
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error, x:
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
def populate_ssl_environ(self):
"""Create WSGI environ entries to be merged into each request."""
cert = open(self.ssl_certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
# Server certificate attributes
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
self.environ.update(ssl_environ)
| apache-2.0 |
jonathanwcrane/boto | boto/ec2/networkinterface.py | 150 | 13597 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Elastic Network Interface
"""
from boto.exception import BotoClientError
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
from boto.ec2.group import Group
class Attachment(object):
"""
:ivar id: The ID of the attachment.
:ivar instance_id: The ID of the instance.
:ivar device_index: The index of this device.
:ivar status: The status of the device.
:ivar attach_time: The time the device was attached.
:ivar delete_on_termination: Whether the device will be deleted
when the instance is terminated.
"""
def __init__(self):
self.id = None
self.instance_id = None
self.instance_owner_id = None
self.device_index = 0
self.status = None
self.attach_time = None
self.delete_on_termination = False
def __repr__(self):
return 'Attachment:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'attachmentId':
self.id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'deviceIndex':
self.device_index = int(value)
elif name == 'instanceOwnerId':
self.instance_owner_id = value
elif name == 'status':
self.status = value
elif name == 'attachTime':
self.attach_time = value
elif name == 'deleteOnTermination':
if value.lower() == 'true':
self.delete_on_termination = True
else:
self.delete_on_termination = False
else:
setattr(self, name, value)
class NetworkInterface(TaggedEC2Object):
"""
An Elastic Network Interface.
:ivar id: The ID of the ENI.
:ivar subnet_id: The ID of the VPC subnet.
:ivar vpc_id: The ID of the VPC.
:ivar description: The description.
:ivar owner_id: The ID of the owner of the ENI.
:ivar requester_managed:
:ivar status: The interface's status (available|in-use).
:ivar mac_address: The MAC address of the interface.
:ivar private_ip_address: The IP address of the interface within
the subnet.
:ivar source_dest_check: Flag to indicate whether to validate
network traffic to or from this network interface.
:ivar groups: List of security groups associated with the interface.
:ivar attachment: The attachment object.
:ivar private_ip_addresses: A list of PrivateIPAddress objects.
"""
def __init__(self, connection=None):
super(NetworkInterface, self).__init__(connection)
self.id = None
self.subnet_id = None
self.vpc_id = None
self.availability_zone = None
self.description = None
self.owner_id = None
self.requester_managed = False
self.status = None
self.mac_address = None
self.private_ip_address = None
self.source_dest_check = None
self.groups = []
self.attachment = None
self.private_ip_addresses = []
def __repr__(self):
return 'NetworkInterface:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(NetworkInterface, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
elif name == 'attachment':
self.attachment = Attachment()
return self.attachment
elif name == 'privateIpAddressesSet':
self.private_ip_addresses = ResultSet([('item', PrivateIPAddress)])
return self.private_ip_addresses
else:
return None
def endElement(self, name, value, connection):
if name == 'networkInterfaceId':
self.id = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'description':
self.description = value
elif name == 'ownerId':
self.owner_id = value
elif name == 'requesterManaged':
if value.lower() == 'true':
self.requester_managed = True
else:
self.requester_managed = False
elif name == 'status':
self.status = value
elif name == 'macAddress':
self.mac_address = value
elif name == 'privateIpAddress':
self.private_ip_address = value
elif name == 'sourceDestCheck':
if value.lower() == 'true':
self.source_dest_check = True
else:
self.source_dest_check = False
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the data associated with this ENI by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
ENI the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_network_interfaces(
[self.id],
dry_run=dry_run
)
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid ENI ID' % self.id)
return self.status
def attach(self, instance_id, device_index, dry_run=False):
"""
Attach this ENI to an EC2 instance.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device_index: int
:param device_index: The interface nunber, N, on the instance (eg. ethN)
:rtype: bool
:return: True if successful
"""
return self.connection.attach_network_interface(
self.id,
instance_id,
device_index,
dry_run=dry_run
)
def detach(self, force=False, dry_run=False):
"""
Detach this ENI from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment
attempt did not occur cleanly.
:rtype: bool
:return: True if successful
"""
attachment_id = getattr(self.attachment, 'id', None)
return self.connection.detach_network_interface(
attachment_id,
force,
dry_run=dry_run
)
def delete(self, dry_run=False):
return self.connection.delete_network_interface(
self.id,
dry_run=dry_run
)
class PrivateIPAddress(object):
def __init__(self, connection=None, private_ip_address=None,
primary=None):
self.connection = connection
self.private_ip_address = private_ip_address
self.primary = primary
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'privateIpAddress':
self.private_ip_address = value
elif name == 'primary':
self.primary = True if value.lower() == 'true' else False
def __repr__(self):
return "PrivateIPAddress(%s, primary=%s)" % (self.private_ip_address,
self.primary)
class NetworkInterfaceCollection(list):
def __init__(self, *interfaces):
self.extend(interfaces)
def build_list_params(self, params, prefix=''):
for i, spec in enumerate(self):
full_prefix = '%sNetworkInterface.%s.' % (prefix, i)
if spec.network_interface_id is not None:
params[full_prefix + 'NetworkInterfaceId'] = \
str(spec.network_interface_id)
if spec.device_index is not None:
params[full_prefix + 'DeviceIndex'] = \
str(spec.device_index)
else:
params[full_prefix + 'DeviceIndex'] = 0
if spec.subnet_id is not None:
params[full_prefix + 'SubnetId'] = str(spec.subnet_id)
if spec.description is not None:
params[full_prefix + 'Description'] = str(spec.description)
if spec.delete_on_termination is not None:
params[full_prefix + 'DeleteOnTermination'] = \
'true' if spec.delete_on_termination else 'false'
if spec.secondary_private_ip_address_count is not None:
params[full_prefix + 'SecondaryPrivateIpAddressCount'] = \
str(spec.secondary_private_ip_address_count)
if spec.private_ip_address is not None:
params[full_prefix + 'PrivateIpAddress'] = \
str(spec.private_ip_address)
if spec.groups is not None:
for j, group_id in enumerate(spec.groups):
query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j)
params[query_param_key] = str(group_id)
if spec.private_ip_addresses is not None:
for k, ip_addr in enumerate(spec.private_ip_addresses):
query_param_key_prefix = (
'%sPrivateIpAddresses.%s' % (full_prefix, k))
params[query_param_key_prefix + '.PrivateIpAddress'] = \
str(ip_addr.private_ip_address)
if ip_addr.primary is not None:
params[query_param_key_prefix + '.Primary'] = \
'true' if ip_addr.primary else 'false'
# Associating Public IPs have special logic around them:
#
# * Only assignable on an device_index of ``0``
# * Only on one interface
# * Only if there are no other interfaces being created
# * Only if it's a new interface (which we can't really guard
# against)
#
# More details on http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-RunInstances.html
if spec.associate_public_ip_address is not None:
if not params[full_prefix + 'DeviceIndex'] in (0, '0'):
raise BotoClientError(
"Only the interface with device index of 0 can " + \
"be provided when using " + \
"'associate_public_ip_address'."
)
if len(self) > 1:
raise BotoClientError(
"Only one interface can be provided when using " + \
"'associate_public_ip_address'."
)
key = full_prefix + 'AssociatePublicIpAddress'
if spec.associate_public_ip_address:
params[key] = 'true'
else:
params[key] = 'false'
class NetworkInterfaceSpecification(object):
def __init__(self, network_interface_id=None, device_index=None,
subnet_id=None, description=None, private_ip_address=None,
groups=None, delete_on_termination=None,
private_ip_addresses=None,
secondary_private_ip_address_count=None,
associate_public_ip_address=None):
self.network_interface_id = network_interface_id
self.device_index = device_index
self.subnet_id = subnet_id
self.description = description
self.private_ip_address = private_ip_address
self.groups = groups
self.delete_on_termination = delete_on_termination
self.private_ip_addresses = private_ip_addresses
self.secondary_private_ip_address_count = \
secondary_private_ip_address_count
self.associate_public_ip_address = associate_public_ip_address
| mit |
eviljeff/zamboni | mkt/site/tests/test_helpers.py | 2 | 7354 | # -*- coding: utf-8 -*-
from django.conf import settings
import fudge
import mock
from datetime import datetime, timedelta
from jingo import env
from nose.tools import eq_
from urlparse import urljoin
import mkt.site.tests
from mkt.site.helpers import absolutify, css, js, product_as_dict, timesince
from mkt.site.fixtures import fixture
from mkt.site.utils import urlparams
from mkt.webapps.models import Webapp
class TestCSS(mkt.site.tests.TestCase):
@mock.patch.object(settings, 'TEMPLATE_DEBUG', True)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_dev_unminified(self, fake_css):
request = mock.Mock()
request.GET = {}
context = {'request': request}
# Should be called with `debug=True`.
fake_css.expects('css').with_args('mkt/devreg', False, True)
css(context, 'mkt/devreg')
@mock.patch.object(settings, 'TEMPLATE_DEBUG', False)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_prod_minified(self, fake_css):
request = mock.Mock()
request.GET = {}
context = {'request': request}
# Should be called with `debug=False`.
fake_css.expects('css').with_args('mkt/devreg', False, False)
css(context, 'mkt/devreg')
@mock.patch.object(settings, 'TEMPLATE_DEBUG', True)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_dev_unminified_overridden(self, fake_css):
request = mock.Mock()
request.GET = {'debug': 'true'}
context = {'request': request}
# Should be called with `debug=True`.
fake_css.expects('css').with_args('mkt/devreg', False, True)
css(context, 'mkt/devreg')
@mock.patch.object(settings, 'TEMPLATE_DEBUG', False)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_prod_unminified_overridden(self, fake_css):
request = mock.Mock()
request.GET = {'debug': 'true'}
context = {'request': request}
# Should be called with `debug=True`.
fake_css.expects('css').with_args('mkt/devreg', False, True)
css(context, 'mkt/devreg')
class TestJS(mkt.site.tests.TestCase):
@mock.patch.object(settings, 'TEMPLATE_DEBUG', True)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_dev_unminified(self, fake_js):
request = mock.Mock()
request.GET = {}
context = {'request': request}
# Should be called with `debug=True`.
fake_js.expects('js').with_args('mkt/devreg', True, False, False)
js(context, 'mkt/devreg')
@mock.patch.object(settings, 'TEMPLATE_DEBUG', False)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_prod_minified(self, fake_js):
request = mock.Mock()
request.GET = {}
context = {'request': request}
# Should be called with `debug=False`.
fake_js.expects('js').with_args('mkt/devreg', False, False, False)
js(context, 'mkt/devreg')
@mock.patch.object(settings, 'TEMPLATE_DEBUG', True)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_dev_unminified_overridden(self, fake_js):
request = mock.Mock()
request.GET = {'debug': 'true'}
context = {'request': request}
# Should be called with `debug=True`.
fake_js.expects('js').with_args('mkt/devreg', True, False, False)
js(context, 'mkt/devreg')
@mock.patch.object(settings, 'TEMPLATE_DEBUG', False)
@fudge.patch('mkt.site.helpers.jingo_minify_helpers')
def test_prod_unminified_overridden(self, fake_js):
request = mock.Mock()
request.GET = {'debug': 'true'}
context = {'request': request}
# Should be called with `debug=True`.
fake_js.expects('js').with_args('mkt/devreg', True, False, False)
js(context, 'mkt/devreg')
class TestProductAsDict(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_correct(self):
request = mock.Mock(GET={'src': 'poop'})
app = Webapp.objects.get(id=337141)
data = product_as_dict(request, app)
eq_(data['src'], 'poop')
eq_(data['is_packaged'], False)
eq_(data['categories'], [])
eq_(data['name'], 'Something Something Steamcube!')
eq_(data['id'], '337141')
eq_(data['manifest_url'],
'http://micropipes.com/temp/steamcube.webapp')
tokenUrl = '/reviewers/app/something-something/token'
recordUrl = '/app/something-something/purchase/record?src=poop'
assert tokenUrl in data['tokenUrl'], (
'Invalid Token URL. Expected %s; Got %s'
% (tokenUrl, data['tokenUrl']))
assert recordUrl in data['recordUrl'], (
'Invalid Record URL. Expected %s; Got %s'
% (recordUrl, data['recordUrl']))
def test_absolutify():
eq_(absolutify('/woo'), urljoin(settings.SITE_URL, '/woo'))
eq_(absolutify('https://marketplace.firefox.com'),
'https://marketplace.firefox.com')
def test_timesince():
month_ago = datetime.now() - timedelta(days=30)
eq_(timesince(month_ago), u'1 month ago')
eq_(timesince(None), u'')
def render(s, context={}):
return env.from_string(s).render(context)
@mock.patch('mkt.site.helpers.reverse')
def test_url(mock_reverse):
render('{{ url("viewname", 1, z=2) }}')
mock_reverse.assert_called_with('viewname', args=(1,), kwargs={'z': 2})
render('{{ url("viewname", 1, z=2, host="myhost") }}')
mock_reverse.assert_called_with('viewname', args=(1,), kwargs={'z': 2})
def test_url_src():
s = render('{{ url("mkt.developers.apps.edit", "a3615", src="xxx") }}')
assert s.endswith('?src=xxx')
def test_f():
# This makes sure there's no UnicodeEncodeError when doing the string
# interpolation.
eq_(render(u'{{ "foo {0}"|f("baré") }}'), u'foo baré')
def test_isotime():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|isotime }}', {'d': time})
eq_(s, '2009-12-25T18:11:12Z')
s = render('{{ d|isotime }}', {'d': None})
eq_(s, '')
def test_urlparams():
url = '/developers'
c = {'base': url,
'base_frag': url + '#hash',
'base_query': url + '?x=y',
'sort': 'name', 'frag': 'frag'}
# Adding a query.
s = render('{{ base_frag|urlparams(sort=sort) }}', c)
eq_(s, '%s?sort=name#hash' % url)
# Adding a fragment.
s = render('{{ base|urlparams(frag) }}', c)
eq_(s, '%s#frag' % url)
# Replacing a fragment.
s = render('{{ base_frag|urlparams(frag) }}', c)
eq_(s, '%s#frag' % url)
# Adding query and fragment.
s = render('{{ base_frag|urlparams(frag, sort=sort) }}', c)
eq_(s, '%s?sort=name#frag' % url)
# Adding query with existing params.
s = render('{{ base_query|urlparams(frag, sort=sort) }}', c)
eq_(s, '%s?sort=name&x=y#frag' % url)
# Replacing a query param.
s = render('{{ base_query|urlparams(frag, x="z") }}', c)
eq_(s, '%s?x=z#frag' % url)
# Params with value of None get dropped.
s = render('{{ base|urlparams(sort=None) }}', c)
eq_(s, url)
# Removing a query
s = render('{{ base_query|urlparams(x=None) }}', c)
eq_(s, url)
def test_urlparams_unicode():
url = u'/xx?evil=reco\ufffd\ufffd\ufffd\u02f5'
urlparams(url)
| bsd-3-clause |
manahl/arctic | tests/integration/tickstore/test_ts_read.py | 1 | 30190 | # -*- coding: utf-8 -*-
from datetime import datetime as dt
import numpy as np
import pandas as pd
import pytest
import six
from mock import patch, call, Mock
from numpy.testing.utils import assert_array_equal
from pandas import DatetimeIndex
from pandas.util.testing import assert_frame_equal
from pymongo import ReadPreference
from arctic._util import mongo_count
from arctic.date import DateRange, mktz, CLOSED_CLOSED, CLOSED_OPEN, OPEN_CLOSED, OPEN_OPEN
from arctic.exceptions import NoDataFoundException
def test_read(tickstore_lib):
data = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'])
assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
assert_array_equal(df['BID'].values, np.array([1545, np.nan]))
assert_array_equal(df['PRICE'].values, np.array([1545, 1543.75]))
assert_array_equal(df.index.values.astype('object'), np.array([1185076787070000000, 1185141600600000000]))
assert tickstore_lib._collection.find_one()['c'] == 2
assert df.index.tzinfo == mktz()
def test_read_data_is_modifiable(tickstore_lib):
data = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'])
df[['BID', 'ASK', 'PRICE']] = 7
assert np.all(df[['BID', 'ASK', 'PRICE']].values == np.array([[7, 7, 7], [7, 7, 7]]))
def test_read_allow_secondary(tickstore_lib):
data = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
with patch('pymongo.collection.Collection.find', side_effect=tickstore_lib._collection.find) as find:
with patch('pymongo.collection.Collection.with_options', side_effect=tickstore_lib._collection.with_options) as with_options:
with patch.object(tickstore_lib, '_read_preference', side_effect=tickstore_lib._read_preference) as read_pref:
df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'], allow_secondary=True)
assert read_pref.call_args_list == [call(True)]
assert with_options.call_args_list == [call(read_preference=ReadPreference.NEAREST)]
assert find.call_args_list == [call({'sy': 'FEED::SYMBOL'}, sort=[('s', 1)], projection={'s': 1, '_id': 0}),
call({'sy': 'FEED::SYMBOL', 's': {'$lte': dt(2007, 8, 21, 3, 59, 47, 70000)}},
projection={'sy': 1, 'cs.PRICE': 1, 'i': 1, 'cs.BID': 1, 's': 1, 'im': 1, 'v': 1, 'cs.ASK': 1})]
assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
assert tickstore_lib._collection.find_one()['c'] == 2
def test_read_symbol_as_column(tickstore_lib):
data = [{'ASK': 1545.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
df = tickstore_lib.read('FEED::SYMBOL', columns=['SYMBOL', 'CUMVOL'])
assert all(df['SYMBOL'].values == ['FEED::SYMBOL'])
def test_read_multiple_symbols(tickstore_lib):
data1 = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070}, ]
data2 = [{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('BAR', data2)
tickstore_lib.write('FOO', data1)
df = tickstore_lib.read(['FOO', 'BAR'], columns=['BID', 'ASK', 'PRICE'])
assert all(df['SYMBOL'].values == ['FOO', 'BAR'])
assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
assert_array_equal(df['BID'].values, np.array([1545, np.nan]))
assert_array_equal(df['PRICE'].values, np.array([1545, 1543.75]))
assert_array_equal(df.index.values.astype('object'), np.array([1185076787070000000, 1185141600600000000]))
assert tickstore_lib._collection.find_one()['c'] == 1
@pytest.mark.parametrize('chunk_size', [1, 100])
def test_read_all_cols_all_dtypes(tickstore_lib, chunk_size):
data = [{'f': 0.1,
'of': 0.2,
's': 's',
'os': 'os',
'l': 1,
'ol': 2,
'index': dt(1970, 1, 1, tzinfo=mktz('UTC')),
},
{'f': 0.3,
'nf': 0.4,
's': 't',
'ns': 'ns',
'l': 3,
'nl': 4,
'index': dt(1970, 1, 1, 0, 0, 1, tzinfo=mktz('UTC')),
},
]
tickstore_lib._chunk_size = chunk_size
tickstore_lib.write('sym', data)
df = tickstore_lib.read('sym', columns=None)
assert df.index.tzinfo == mktz()
# The below is probably more trouble than it's worth, but we *should*
# be able to roundtrip data and get the same answer...
# Ints become floats
data[0]['l'] = float(data[0]['l'])
# Treat missing strings as None
data[0]['ns'] = None
data[1]['os'] = None
index = DatetimeIndex([dt(1970, 1, 1, tzinfo=mktz('UTC')),
dt(1970, 1, 1, 0, 0, 1, tzinfo=mktz('UTC'))],
)
df.index = df.index.tz_convert(mktz('UTC'))
expected = pd.DataFrame(data, index=index)
expected = expected[df.columns]
assert_frame_equal(expected, df, check_names=False)
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'b': 3.,
'c': 4.,
'index': dt(2013, 1, 2, tzinfo=mktz('Europe/London'))
},
{'b': 5.,
'c': 6.,
'index': dt(2013, 1, 3, tzinfo=mktz('Europe/London'))
},
{'b': 7.,
'c': 8.,
'index': dt(2013, 1, 4, tzinfo=mktz('Europe/London'))
},
{'b': 9.,
'c': 10.,
'index': dt(2013, 1, 5, tzinfo=mktz('Europe/London'))
},
]
def test_date_range(tickstore_lib):
tickstore_lib.write('SYM', DUMMY_DATA)
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130103), columns=None)
assert_array_equal(df['a'].values, np.array([1, np.nan, np.nan]))
assert_array_equal(df['b'].values, np.array([2., 3., 5.]))
assert_array_equal(df['c'].values, np.array([np.nan, 4., 6.]))
tickstore_lib.delete('SYM')
# Chunk every 3 symbols and lets have some fun
tickstore_lib._chunk_size = 3
tickstore_lib.write('SYM', DUMMY_DATA)
with patch('pymongo.collection.Collection.find', side_effect=tickstore_lib._collection.find) as f:
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130103), columns=None)
assert_array_equal(df['b'].values, np.array([2., 3., 5.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130103), columns=None)
assert_array_equal(df['b'].values, np.array([3., 5.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130103), columns=None)
assert_array_equal(df['b'].values, np.array([5.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130104), columns=None)
assert_array_equal(df['b'].values, np.array([3., 5., 7.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130105), columns=None)
assert_array_equal(df['b'].values, np.array([3., 5., 7., 9.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130104), columns=None)
assert_array_equal(df['b'].values, np.array([5., 7.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130105), columns=None)
assert_array_equal(df['b'].values, np.array([5., 7., 9.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105), columns=None)
assert_array_equal(df['b'].values, np.array([7., 9.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
# Test the different open-closed behaviours
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, CLOSED_CLOSED), columns=None)
assert_array_equal(df['b'].values, np.array([7., 9.]))
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, CLOSED_OPEN), columns=None)
assert_array_equal(df['b'].values, np.array([7.]))
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, OPEN_CLOSED), columns=None)
assert_array_equal(df['b'].values, np.array([9.]))
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, OPEN_OPEN), columns=None)
assert_array_equal(df['b'].values, np.array([]))
def test_date_range_end_not_in_range(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'b': 3.,
'c': 4.,
'index': dt(2013, 1, 2, 10, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
with patch.object(tickstore_lib._collection, 'find', side_effect=tickstore_lib._collection.find) as f:
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, dt(2013, 1, 2, 9, 0)), columns=None)
assert_array_equal(df['b'].values, np.array([2.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
@pytest.mark.parametrize('tz_name', ['UTC',
'Europe/London', # Sometimes ahead of UTC
'America/New_York', # Behind UTC
])
def test_date_range_default_timezone(tickstore_lib, tz_name):
"""
We assume naive datetimes are user-local
"""
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz(tz_name))
},
# Half-way through the year
{'b': 3.,
'c': 4.,
'index': dt(2013, 7, 1, tzinfo=mktz(tz_name))
},
]
with patch('tzlocal.get_localzone', return_value=Mock(zone=tz_name)):
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130701), columns=None)
assert df.index.tzinfo == mktz()
assert len(df) == 2
assert df.index[1] == dt(2013, 7, 1, tzinfo=mktz(tz_name))
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130101), columns=None)
assert len(df) == 1
assert df.index.tzinfo == mktz()
df = tickstore_lib.read('SYM', date_range=DateRange(20130701, 20130701), columns=None)
assert len(df) == 1
assert df.index.tzinfo == mktz()
def test_date_range_no_bounds(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 1, 30, tzinfo=mktz('Europe/London'))
},
{'b': 5.,
'c': 6.,
'index': dt(2013, 2, 2, 10, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
# 1) No start, no end
df = tickstore_lib.read('SYM', columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# 1.2) Start before the real start
df = tickstore_lib.read('SYM', date_range=DateRange(20121231), columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# 2.1) Only go one month out
df = tickstore_lib.read('SYM', date_range=DateRange(20130101), columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# 2.2) Only go one month out
df = tickstore_lib.read('SYM', date_range=DateRange(20130102), columns=None)
assert_array_equal(df['b'].values, np.array([4.]))
# 3) No start
df = tickstore_lib.read('SYM', date_range=DateRange(end=20130102), columns=None)
assert_array_equal(df['b'].values, np.array([2.]))
# 4) Outside bounds
df = tickstore_lib.read('SYM', date_range=DateRange(end=20131212), columns=None)
assert_array_equal(df['b'].values, np.array([2., 4., 5.]))
def test_date_range_BST(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
df = tickstore_lib.read('SYM', columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12),
# dt(2013, 6, 1, 13)))
# assert_array_equal(df['b'].values, np.array([2., 4.]))
df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, tzinfo=mktz('Europe/London')),
dt(2013, 6, 1, 13, tzinfo=mktz('Europe/London'))))
assert_array_equal(df['b'].values, np.array([2., 4.]))
df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 13, tzinfo=mktz('UTC'))))
assert_array_equal(df['b'].values, np.array([4., ]))
def test_read_no_data(tickstore_lib):
with pytest.raises(NoDataFoundException):
tickstore_lib.read('missing_sym', DateRange(20131212, 20131212))
def test_write_no_tz(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 12, 00)
}]
with pytest.raises(ValueError):
tickstore_lib.write('SYM', DUMMY_DATA)
def test_read_out_of_order(tickstore_lib):
data = [{'A': 120, 'D': 1}, {'A': 122, 'B': 2.0}, {'A': 3, 'B': 3.0, 'D': 1}]
tick_index = [dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 11, 00, tzinfo=mktz('UTC')), # Out-of-order
dt(2013, 6, 1, 13, 00, tzinfo=mktz('UTC'))]
data = pd.DataFrame(data, index=tick_index)
tickstore_lib._chunk_size = 3
tickstore_lib.write('SYM', data)
tickstore_lib.read('SYM', columns=None)
assert len(tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, tzinfo=mktz('UTC')), dt(2013, 6, 2, tzinfo=mktz('UTC'))))) == 3
assert len(tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, tzinfo=mktz('UTC')), dt(2013, 6, 1, 12, tzinfo=mktz('UTC'))))) == 2
def test_read_chunk_boundaries(tickstore_lib):
SYM1_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 5.,
'b': 6.,
'index': dt(2013, 6, 1, 14, 00, tzinfo=mktz('UTC'))
}
]
SYM2_DATA = [
{'a': 7.,
'b': 8.,
'index': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC'))
},
{'a': 9.,
'b': 10.,
'index': dt(2013, 6, 1, 13, 30, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 11.,
'b': 12.,
'index': dt(2013, 6, 1, 14, 30, tzinfo=mktz('UTC'))
}
]
tickstore_lib._chunk_size = 2
tickstore_lib.write('SYM1', SYM1_DATA)
tickstore_lib.write('SYM2', SYM2_DATA)
assert len(tickstore_lib.read('SYM1', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')), dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC'))))) == 2
assert len(tickstore_lib.read('SYM2', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')), dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC'))))) == 2
assert len(tickstore_lib.read(['SYM1', 'SYM2'], columns=None, date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')), dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC'))))) == 4
def test_read_spanning_chunks(tickstore_lib):
SYM1_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 11, 00, tzinfo=mktz('UTC'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 5.,
'b': 6.,
'index': dt(2013, 6, 1, 14, 00, tzinfo=mktz('UTC'))
}
]
SYM2_DATA = [
{'a': 7.,
'b': 8.,
'index': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC'))
},
{'a': 9.,
'b': 10.,
'index': dt(2013, 6, 1, 13, 30, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 11.,
'b': 12.,
'index': dt(2013, 6, 1, 14, 30, tzinfo=mktz('UTC'))
}
]
tickstore_lib._chunk_size = 2
tickstore_lib.write('SYM1', SYM1_DATA)
tickstore_lib.write('SYM2', SYM2_DATA)
# Even though the latest chunk that's the closest to the start point for SYM1 starts at 11:00, it ends before the start point,
# so we want to ignore it and start from SYM2 (12:30) instead.
assert tickstore_lib._mongo_date_range_query(
['SYM1', 'SYM2'],
date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC')))) == \
{'s': {'$gte': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC')), '$lte': dt(2013, 6, 1, 15, 0, tzinfo=mktz('UTC'))}}
def test_read_inside_range(tickstore_lib):
SYM1_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 0, 00, tzinfo=mktz('UTC'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 1, 00, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 5.,
'b': 6.,
'index': dt(2013, 6, 1, 14, 00, tzinfo=mktz('UTC'))
}
]
SYM2_DATA = [
{'a': 7.,
'b': 8.,
'index': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC'))
},
{'a': 9.,
'b': 10.,
'index': dt(2013, 6, 1, 13, 30, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 11.,
'b': 12.,
'index': dt(2013, 6, 1, 14, 30, tzinfo=mktz('UTC'))
}
]
tickstore_lib._chunk_size = 2
tickstore_lib.write('SYM1', SYM1_DATA)
tickstore_lib.write('SYM2', SYM2_DATA)
# If there are no chunks spanning the range, we still cap the start range so that we don't
# fetch SYM1's 0am--1am chunk
assert tickstore_lib._mongo_date_range_query(
['SYM1', 'SYM2'],
date_range=DateRange(dt(2013, 6, 1, 10, 0, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 15, 0, tzinfo=mktz('UTC')))) == \
{'s': {'$gte': dt(2013, 6, 1, 10, 0, tzinfo=mktz('UTC')), '$lte': dt(2013, 6, 1, 15, 0, tzinfo=mktz('UTC'))}}
def test_read_longs(tickstore_lib):
DUMMY_DATA = [
{'a': 1,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('Europe/London'))
},
{
'b': 4,
'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 3
tickstore_lib.write('SYM', DUMMY_DATA)
tickstore_lib.read('SYM', columns=None)
read = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1), dt(2013, 6, 2)))
assert read['a'][0] == 1
assert np.isnan(read['b'][0])
def test_read_with_image(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'index': dt(2013, 1, 1, 11, 00, tzinfo=mktz('Europe/London'))
},
{
'b': 4.,
'index': dt(2013, 1, 1, 12, 00, tzinfo=mktz('Europe/London'))
},
]
# Add an image
tickstore_lib.write('SYM', DUMMY_DATA)
tickstore_lib._collection.update_one({},
{'$set':
{'im': {'i':
{'a': 37.,
'c': 2.,
},
't': dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
}
}
}
)
dr = DateRange(dt(2013, 1, 1), dt(2013, 1, 2))
# tickstore_lib.read('SYM', columns=None)
df = tickstore_lib.read('SYM', columns=None, date_range=dr)
assert df['a'][0] == 1
# Read with the image as well - all columns
df = tickstore_lib.read('SYM', columns=None, date_range=dr, include_images=True)
assert set(df.columns) == set(('a', 'b', 'c'))
assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
assert_array_equal(df['b'].values, np.array([np.nan, np.nan, 4]))
assert_array_equal(df['c'].values, np.array([2, np.nan, np.nan]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
# Read just columns from the updates
df = tickstore_lib.read('SYM', columns=('a', 'b'), date_range=dr, include_images=True)
assert set(df.columns) == set(('a', 'b'))
assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
assert_array_equal(df['b'].values, np.array([np.nan, np.nan, 4]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
# Read one column from the updates
df = tickstore_lib.read('SYM', columns=('a',), date_range=dr, include_images=True)
assert set(df.columns) == set(('a',))
assert_array_equal(df['a'].values, np.array([37, 1]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
# Read just the image column
df = tickstore_lib.read('SYM', columns=['c'], date_range=dr, include_images=True)
assert set(df.columns) == set(['c'])
assert_array_equal(df['c'].values, np.array([2]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
def test_read_with_metadata(tickstore_lib):
metadata = {'metadata': 'important data'}
tickstore_lib.write('test', [{'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London')), 'price': 100.50, 'ticker': 'QQQ'}], metadata=metadata)
m = tickstore_lib.read_metadata('test')
assert(metadata == m)
def test_read_strings(tickstore_lib):
df = pd.DataFrame(data={'data': ['A', 'B', 'C']},
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 3, 00, tzinfo=mktz('UTC'))], name='date'))
tickstore_lib.write('test', df)
read_df = tickstore_lib.read('test')
assert(all(read_df['data'].values == df['data'].values))
def test_read_utf8_strings(tickstore_lib):
data = ['一', '二', '三'] # Chinese character [one, two , three]
if six.PY2:
utf8_data = data
unicode_data = [s.decode('utf8') for s in data]
else:
utf8_data = [s.encode('utf8') for s in data]
unicode_data = data
df = pd.DataFrame(data={'data': utf8_data},
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 3, 00, tzinfo=mktz('UTC'))], name='date'))
tickstore_lib.write('test', df)
read_df = tickstore_lib.read('test')
assert(all(read_df['data'].values == np.array(unicode_data)))
def test_read_unicode_strings(tickstore_lib):
df = pd.DataFrame(data={'data': [u'一', u'二', u'三']}, # Chinese character [one, two , three]
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 3, 00, tzinfo=mktz('UTC'))], name='date'))
tickstore_lib.write('test', df)
read_df = tickstore_lib.read('test')
assert(all(read_df['data'].values == df['data'].values))
def test_objects_fail(tickstore_lib):
class Fake(object):
def __init__(self, val):
self.val = val
def fake(self):
return self.val
df = pd.DataFrame(data={'data': [Fake(1), Fake(2)]},
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC'))], name='date'))
with pytest.raises(Exception) as e:
tickstore_lib.write('test', df)
assert('Casting object column to string failed' in str(e.value))
| lgpl-2.1 |
stratosgear/bravado | tests/requests_client/RequestsFutureAdapter/build_timeout_test.py | 4 | 2521 | from mock import Mock
import pytest
from requests.sessions import Session
from bravado.requests_client import RequestsFutureAdapter
@pytest.fixture
def request():
return dict(url='http://foo.com')
@pytest.fixture
def session():
return Mock(spec=Session)
def test_no_timeouts(session, request):
misc_options = {}
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=None) is None
def test_service_timeout_and_result_timeout_None(session, request):
misc_options = dict(timeout=1)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=None) == 1
def test_no_service_timeout_and_result_timeout_not_None(session, request):
misc_options = {}
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=1) == 1
def test_service_timeout_lt_result_timeout(session, request):
misc_options = dict(timeout=10)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=11) == 11
def test_service_timeout_gt_result_timeout(session, request):
misc_options = dict(timeout=11)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=10) == 11
def test_service_timeout_None_result_timeout_not_None(session, request):
misc_options = dict(timeout=None)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=10) == 10
def test_service_timeout_not_None_result_timeout_None(session, request):
misc_options = dict(timeout=10)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=None) == 10
def test_both_timeouts_the_same(session, request):
misc_options = dict(timeout=10)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=10) == 10
def test_connect_timeout_and_idle_timeout(session, request):
misc_options = dict(connect_timeout=1, timeout=11)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=None) == (1, 11)
def test_connect_timeout_only(session, request):
misc_options = dict(connect_timeout=1)
future = RequestsFutureAdapter(session, request, misc_options)
assert future.build_timeout(result_timeout=None) == (1, None)
| bsd-3-clause |
rhertzog/django | tests/humanize_tests/tests.py | 10 | 13154 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
from unittest import skipIf
from django.contrib.humanize.templatetags import humanize
from django.template import Context, Template, defaultfilters
from django.test import SimpleTestCase, modify_settings, override_settings
from django.utils import translation
from django.utils.html import escape
from django.utils.timezone import get_fixed_timezone, utc
from django.utils.translation import ugettext as _
try:
import pytz
except ImportError:
pytz = None
# Mock out datetime in some tests so they don't fail occasionally when they
# run too slow. Use a fixed datetime for datetime.now(). DST change in
# America/Chicago (the default time zone) happened on March 11th in 2012.
now = datetime.datetime(2012, 3, 9, 22, 30)
class MockDateTime(datetime.datetime):
@classmethod
def now(cls, tz=None):
if tz is None or tz.utcoffset(now) is None:
return now
else:
# equals now.replace(tzinfo=utc)
return now.replace(tzinfo=tz) + tz.utcoffset(now)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.humanize'})
class HumanizeTests(SimpleTestCase):
def humanize_tester(self, test_list, result_list, method, normalize_result_func=escape):
for test_content, result in zip(test_list, result_list):
t = Template('{%% load humanize %%}{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, normalize_result_func(result),
msg="%s test failed, produced '%s', should've produced '%s'" % (method, rendered, result))
def test_ordinal(self):
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'ordinal')
def test_i18n_html_ordinal(self):
"""Allow html in output on i18n strings"""
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1<sup>er</sup>', '2<sup>e</sup>', '3<sup>e</sup>', '4<sup>e</sup>',
'11<sup>e</sup>', '12<sup>e</sup>', '13<sup>e</sup>', '101<sup>er</sup>',
'102<sup>e</sup>', '103<sup>e</sup>', '111<sup>e</sup>', 'something else',
'None')
with translation.override('fr-fr'):
self.humanize_tester(test_list, result_list, 'ordinal', lambda x: x)
def test_intcomma(self):
test_list = (
100, 1000, 10123, 10311, 1000000, 1234567.25, '100', '1000',
'10123', '10311', '1000000', '1234567.1234567',
Decimal('1234567.1234567'), None,
)
result_list = (
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
'1,234,567.1234567', None,
)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_l10n_intcomma(self):
test_list = (
100, 1000, 10123, 10311, 1000000, 1234567.25, '100', '1000',
'10123', '10311', '1000000', '1234567.1234567',
Decimal('1234567.1234567'), None,
)
result_list = (
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567',
'1,234,567.1234567', None,
)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=False):
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intcomma_without_number_grouping(self):
# Regression for #17414
with translation.override('ja'), self.settings(USE_L10N=True):
self.humanize_tester([100], ['100'], 'intcomma')
def test_intword(self):
test_list = (
'100', '1000000', '1200000', '1290000', '1000000000', '2000000000',
'6000000000000', '1300000000000000', '3500000000000000000000',
'8100000000000000000000000000000000', None,
)
result_list = (
'100', '1.0 million', '1.2 million', '1.3 million', '1.0 billion',
'2.0 billion', '6.0 trillion', '1.3 quadrillion', '3.5 sextillion',
'8.1 decillion', None,
)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intword')
def test_i18n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', None)
result_list = ('100', '1.000', '10.123', '10.311', '1.000.000', '1.234.567,25',
'100', '1.000', '10.123', '10.311', '1.000.000', None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_i18n_intword(self):
test_list = (
'100', '1000000', '1200000', '1290000', '1000000000', '2000000000',
'6000000000000',
)
result_list = (
'100', '1,0 Million', '1,2 Millionen', '1,3 Millionen',
'1,0 Milliarde', '2,0 Milliarden', '6,0 Billionen',
)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = ('one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', '10', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
someday = today - datetime.timedelta(days=10)
notdate = "I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (_('today'), _('yesterday'), _('tomorrow'),
someday_result, "I'm not a date value", None)
self.humanize_tester(test_list, result_list, 'naturalday')
def test_naturalday_tz(self):
today = datetime.date.today()
tz_one = get_fixed_timezone(-720)
tz_two = get_fixed_timezone(720)
# Can be today or yesterday
date_one = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_one)
naturalday_one = humanize.naturalday(date_one)
# Can be today or tomorrow
date_two = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_two)
naturalday_two = humanize.naturalday(date_two)
# As 24h of difference they will never be the same
self.assertNotEqual(naturalday_one, naturalday_two)
@skipIf(pytz is None, "this test requires pytz")
def test_naturalday_uses_localtime(self):
# Regression for #18504
# This is 2012-03-08HT19:30:00-06:00 in America/Chicago
dt = datetime.datetime(2012, 3, 9, 1, 30, tzinfo=utc)
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with override_settings(TIME_ZONE="America/Chicago", USE_TZ=True):
with translation.override('en'):
self.humanize_tester([dt], ['yesterday'], 'naturalday')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
test_list = [
now,
now - datetime.timedelta(seconds=1),
now - datetime.timedelta(seconds=30),
now - datetime.timedelta(minutes=1, seconds=30),
now - datetime.timedelta(minutes=2),
now - datetime.timedelta(hours=1, minutes=30, seconds=30),
now - datetime.timedelta(hours=23, minutes=50, seconds=50),
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=500),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=30),
now + datetime.timedelta(minutes=1, seconds=30),
now + datetime.timedelta(minutes=2),
now + datetime.timedelta(hours=1, minutes=30, seconds=30),
now + datetime.timedelta(hours=23, minutes=50, seconds=50),
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2, hours=6),
now + datetime.timedelta(days=500),
now.replace(tzinfo=naive()),
now.replace(tzinfo=utc),
]
result_list = [
'now',
'a second ago',
'30\xa0seconds ago',
'a minute ago',
'2\xa0minutes ago',
'an hour ago',
'23\xa0hours ago',
'1\xa0day ago',
'1\xa0year, 4\xa0months ago',
'a second from now',
'30\xa0seconds from now',
'a minute from now',
'2\xa0minutes from now',
'an hour from now',
'23\xa0hours from now',
'1\xa0day from now',
'2\xa0days, 6\xa0hours from now',
'1\xa0year, 4\xa0months from now',
'now',
'now',
]
# Because of the DST change, 2 days and 6 hours after the chosen
# date in naive arithmetic is only 2 days and 5 hours after in
# aware arithmetic.
result_list_with_tz_support = result_list[:]
assert result_list_with_tz_support[-4] == '2\xa0days, 6\xa0hours from now'
result_list_with_tz_support[-4] == '2\xa0days, 5\xa0hours from now'
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'naturaltime')
with override_settings(USE_TZ=True):
self.humanize_tester(
test_list, result_list_with_tz_support, 'naturaltime')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime_as_documented(self):
"""
#23340 -- Verify the documented behavior of humanize.naturaltime.
"""
time_format = '%d %b %Y %H:%M:%S'
documented_now = datetime.datetime.strptime('17 Feb 2007 16:30:00', time_format)
test_data = (
('17 Feb 2007 16:30:00', 'now'),
('17 Feb 2007 16:29:31', '29 seconds ago'),
('17 Feb 2007 16:29:00', 'a minute ago'),
('17 Feb 2007 16:25:35', '4 minutes ago'),
('17 Feb 2007 15:30:29', '59 minutes ago'),
('17 Feb 2007 15:30:01', '59 minutes ago'),
('17 Feb 2007 15:30:00', 'an hour ago'),
('17 Feb 2007 13:31:29', '2 hours ago'),
('16 Feb 2007 13:31:29', '1 day, 2 hours ago'),
('16 Feb 2007 13:30:01', '1 day, 2 hours ago'),
('16 Feb 2007 13:30:00', '1 day, 3 hours ago'),
('17 Feb 2007 16:30:30', '30 seconds from now'),
('17 Feb 2007 16:30:29', '29 seconds from now'),
('17 Feb 2007 16:31:00', 'a minute from now'),
('17 Feb 2007 16:34:35', '4 minutes from now'),
('17 Feb 2007 17:30:29', 'an hour from now'),
('17 Feb 2007 18:31:29', '2 hours from now'),
('18 Feb 2007 16:31:29', '1 day from now'),
('26 Feb 2007 18:31:29', '1 week, 2 days from now'),
)
class DocumentedMockDateTime(datetime.datetime):
@classmethod
def now(cls, tz=None):
if tz is None or tz.utcoffset(documented_now) is None:
return documented_now
else:
return documented_now.replace(tzinfo=tz) + tz.utcoffset(now)
orig_humanize_datetime = humanize.datetime
humanize.datetime = DocumentedMockDateTime
try:
for test_time_string, expected_natural_time in test_data:
test_time = datetime.datetime.strptime(test_time_string, time_format)
natural_time = humanize.naturaltime(test_time).replace('\xa0', ' ')
self.assertEqual(expected_natural_time, natural_time)
finally:
humanize.datetime = orig_humanize_datetime
| bsd-3-clause |
melviso/beatle | beatle/model/py/ArgsArgument.py | 2 | 1370 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 22 22:31:28 2013
@author: mel
"""
from beatle.tran import TransactionalMethod, TransactionalMoveObject
from Argument import Argument
class ArgsArgument(Argument):
"""Implements argument representation"""
context_container = True
#visual methods
@TransactionalMethod('move argument {0}')
def drop(self, to):
"""drop this elemento to another place"""
target = to.inner_argument_container
if not target or to.project != self.project:
return False # avoid move arguments between projects
index = 0 # trick for insert as first child
TransactionalMoveObject(
object=self, origin=self.parent, target=target, index=index)
return True
def __init__(self, **kwargs):
"""Initialization"""
kwargs['name'] = 'args'
kwargs['default'] = ''
super(ArgsArgument, self).__init__(**kwargs)
container = self.outer_class or self.outer_module
container._lastSrcTime = None
container._lastHdrTime = None
@property
def bitmap_index(self):
"""Index of tree image"""
from beatle.app import resources as rc
return rc.GetBitmapIndex('py_args')
@property
def label(self):
"""Get tree label"""
return '*{self._name}'.format(self=self)
| gpl-2.0 |
dkarakats/edx-platform | common/djangoapps/student/migrations/0006_expand_meta_field.py | 188 | 9246 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
byt3smith/CIRTKit | modules/reversing/viper/peepdf/aespython/cfb_mode.py | 42 | 2268 | #!/usr/bin/env python
"""
CFB Mode of operation
Running this file as __main__ will result in a self-test of the algorithm.
Algorithm per NIST SP 800-38A http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
Copyright (c) 2010, Adam Newman http://www.caller9.com/
Licensed under the MIT license http://www.opensource.org/licenses/mit-license.php
"""
__author__ = "Adam Newman"
class CFBMode:
"""Perform CFB operation on a block and retain IV information for next operation"""
def __init__(self, block_cipher, block_size):
self._block_cipher = block_cipher
self._block_size = block_size
self._iv = [0] * block_size
def set_iv(self, iv):
if len(iv) == self._block_size:
self._iv = iv
def encrypt_block(self, plaintext):
cipher_iv = self._block_cipher.cipher_block(self._iv)
iv = self._iv = [i ^ j for i,j in zip (plaintext, cipher_iv)]
return iv
def decrypt_block(self, ciphertext):
cipher_iv = self._block_cipher.cipher_block(self._iv)
self._iv = ciphertext
return [i ^ j for i,j in zip (cipher_iv, ciphertext)]
import unittest
class TestEncryptionMode(unittest.TestCase):
def test_mode(self):
#Self test
import key_expander
import aes_cipher
import test_keys
test_data = test_keys.TestKeys()
test_expander = key_expander.KeyExpander(256)
test_expanded_key = test_expander.expand(test_data.test_mode_key)
test_cipher = aes_cipher.AESCipher(test_expanded_key)
test_cfb = CFBMode(test_cipher, 16)
test_cfb.set_iv(test_data.test_mode_iv)
for k in range(4):
self.assertEquals(len([i for i, j in zip(test_data.test_cfb_ciphertext[k],test_cfb.encrypt_block(test_data.test_mode_plaintext[k])) if i == j]),
16,
msg='CFB encrypt test block' + str(k))
test_cfb.set_iv(test_data.test_mode_iv)
for k in range(4):
self.assertEquals(len([i for i, j in zip(test_data.test_mode_plaintext[k],test_cfb.decrypt_block(test_data.test_cfb_ciphertext[k])) if i == j]),
16,
msg='CFB decrypt test block' + str(k))
if __name__ == "__main__":
unittest.main()
| mit |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/package/gphdfs/__init__.py | 21 | 15985 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
import socket
import re
from tinctest.lib.gpfdist import gpfdist, GpfdistError
from tinctest.lib import local_path, run_shell_command
from tinctest.main import TINCException
from lib.phd_rpm_util import PHDRpmUtil
from lib.cdh_rpm_util import CDHRpmUtil
from lib.apache_tar_util import ApacheTarUtil
from lib.kerberos_util import KerberosUtil
class HadoopIntegrationException(TINCException):
"""Exception class for hadoop integration test."""
pass
class HadoopIntegration(object):
"""Integrates Hadoop and GPDB."""
def __init__(self, hadoop_type, gphdfs_connector, hadoop_artifact_url, hadoop_install_dir, hadoop_data_dir, template_conf_dir, secure_hadoop, node_list):
self.hadoop_type = hadoop_type
self.gphdfs_connector = gphdfs_connector
self.hadoop_artifact_url = hadoop_artifact_url
self.hadoop_install_dir = hadoop_install_dir
self.hadoop_data_dir = hadoop_data_dir
self.template_conf_dir = template_conf_dir
self.secure_hadoop = secure_hadoop
self.node_list = node_list
self.cur_dir = os.path.abspath(os.path.dirname(__file__))
self.gpfdistport = '8080'
(host, domain) = self._get_host_and_domain()
if host:
self.hostname = host
else:
self.hostname = 'localhost'
if domain:
self.domain = domain
else:
self.domain = 'localdomain.com'
self.gpfdistport = self._get_gpfdistport(self.gpfdistport)
def _get_gpfdistport(self,gpfdistport):
result = 0
while result == 0:
cmd_str = "netstat -a| grep %s" %gpfdistport
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd_str,'Grep Netstat',res)
result = res['rc']
if result == 0:
gpfdistport = str(int(gpfdistport) + 1)
else:
return gpfdistport
def _get_host_and_domain(self):
hostname = ''
domain = ''
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command('hostname','Get hostname command',res)
result = res['stdout']
if len(result) > 0:
hostname = result.split('\n')[0]
if hostname.find('.') >= 0:
domain = hostname[ hostname.find('.')+1 : ]
hostname = hostname[ : hostname.find('.') ]
return (hostname, domain)
def _create_test_jars(self, export_env, java_classpath):
cmd_str = "%s cd %s; javac -cp %s javaclasses/*.java" %(export_env, self.cur_dir, java_classpath)
if not run_shell_command(cmd_str, "Compiling java classes"):
raise HadoopIntegrationException("Error while compiling java classes!")
cmd_str = "cd %s; jar cf maptest.jar javaclasses/*.class" %self.cur_dir
if not run_shell_command(cmd_str, "Creating jar file"):
raise HadoopIntegrationException("Error while creating the jar!")
def _create_java_cmd_string(self, export_env, java_classpath):
envvar = '-Dhdfshost=' + self.fqdn + ' -Ddatanodeport=8020 -Djobtrackerhost=' + self.fqdn + ' -Djobtrackerport=8020 '
java_cmd = "%s java -cp %s:%s/maptest.jar %s" % (export_env, java_classpath, self.cur_dir, envvar)
return java_cmd
def _create_test_data(self, datasize, large_datasize, test_data_types):
"""
Creates the test data required for the sqls to run.
"""
data_dir = self.cur_dir + '/tmp/text'
run_shell_command('mkdir -p %s' %data_dir)
for data_type in test_data_types:
data_type_file = data_dir + "/" + data_type + ".txt"
cmd_str = "python %s/lib/create_data.py %s %s > %s" %(self.cur_dir, datasize, data_type, data_type_file)
run_shell_command(cmd_str, "Create data for type -> %s" %data_type)
cmd_str = "python %s/lib/create_data.py %s regression > %s/tmp/random_with_seed_1.largetxt" %(self.cur_dir, large_datasize, self.cur_dir)
run_shell_command(cmd_str, "Create regress test data")
cmd_str = "python %s/lib/create_data.py %s all > %s/all_20.txt" %(self.cur_dir, str(int(datasize) * 20), data_dir)
run_shell_command(cmd_str, "Create regress test data for datasize * 20")
cmd_str = "python %s/lib/create_data.py %s all > %s/all_100.txt" %(self.cur_dir, str(int(datasize) * 100), data_dir)
run_shell_command(cmd_str, "Create regress test data for datasize * 100")
# create test data for typemismatch test
run_shell_command("sed 's/bigint/text/g' %s/bigint.txt > %s/bigint_text.txt" %(data_dir, data_dir), "create test data for typemismatch test")
# copy composite file into data_dir
run_shell_command("cp %s/sql/data/compositeType.txt %s" %(self.cur_dir, data_dir), "Copy composite file" )
# put test data files in HDFS
self.hadoop_util.put_file_in_hdfs("%s/sql/regression/data/*" %self.cur_dir, "/plaintext/")
self.hadoop_util.put_file_in_hdfs("%s/tmp/random_with_seed_1.largetxt" %self.cur_dir, "/plaintext/random_with_seed_1.largetxt")
self.hadoop_util.put_file_in_hdfs("%s/tmp/text/all_100.txt" %self.cur_dir, "/plaintext/all_100.txt")
self.hadoop_util.put_file_in_hdfs("%s/tmp/text/all.txt" %self.cur_dir, "/plaintext/all.txt")
self.hadoop_util.put_file_in_hdfs("%s/tmp/text/timestamp.txt" %self.cur_dir, "/plaintext/timestamp.txt")
self.hadoop_util.put_file_in_hdfs("%s/tmp/text/varchar.txt" %self.cur_dir, "/plaintext/varchar.txt")
self.hadoop_util.put_file_in_hdfs("%s/sql/data/*" %self.cur_dir, "/plaintext/")
# start gpfdist process
# gpfdist_process = gpfdist(self.gpfdistport, self.fqdn)
# assert (gpfdist_process.start(options=' -d %s' %data_dir))
self.start_gpfdist_process(data_dir)
def start_gpfdist_process(self, data_dir):
# start gpfdist process:
# we have seen cases where the gfdist process don't start on particular port,
# due to connection bind error or due to "FATAL cannot create socket on port 8080"
# this occurs in spite of checking netstat for used ports at the beginning
# so as a hack, we keep on trying different ports until gpfdist is started
gpfdist_process_started = False
while not gpfdist_process_started:
gpfdist_process = gpfdist(self.gpfdistport, self.fqdn)
try:
gpfdist_process.start(options=' -d %s' %data_dir)
except GpfdistError as message:
tinctest.logger.warn("Couldn't setup gpfdist on port %s"%self.gpfdistport)
gpfdist_process_started = False
self.gpfdistport = str(int(self.gpfdistport) + 1)
else:
gpfdist_process_started = True
tinctest.logger.info("Started gpfdist on port %s"%self.gpfdistport)
def get_ip_address(self):
return socket.gethostbyname(socket.gethostname())
def _setup_gpdb_configurations(self, gphome, mdd, gpdb_template_conf, hadoop_home, hadoop_common_home, hadoop_guc):
"""
Updates the gpdb template confgiration files per the current env.
Also copies required configuration files.
"""
text = "\n### Hadoop specific variables\n"
if self.secure_hadoop:
text = text + "export HADOOP_SECURE_DN_USER=hdfs\n"
text = text + "export CLASSPATH=$HADOOP_HOME/lib\n" \
"export GP_JAVA_OPT=\"$GP_JAVA_OPT -Djava.library.path=$HADOOP_HOME/lib/native/\"\n" \
"export GP_HADOOP_CONN_JARDIR=lib/hadoop\n" \
"export GP_HADOOP_CONN_VERSION=%s\n" %self.gphdfs_connector
greenplum_path_file = os.path.join(gphome,"greenplum_path.sh")
self.hadoop_util.append_text_to_file(greenplum_path_file, text)
host = str(self.get_ip_address()) + "/32"
text = "local all _hadoop_perm_test_role trust\n" \
"host all _hadoop_perm_test_role %s trust\n" %host
self.hadoop_util.append_text_to_file(mdd + "/pg_hba.conf", text)
cmd_str = "source %s; gpconfig -c gp_hadoop_target_version -v %s" %(greenplum_path_file, hadoop_guc)
run_shell_command(cmd_str, "Setting gp_hadoop_target_version as %s" %hadoop_guc)
cmd_str = "source %s; gpstop -air" %greenplum_path_file
assert run_shell_command(cmd_str, "Restart GPDB")
# create hadoop_env.sh file based on the hadoop type
transforms = {'%CONNECTOR%' : self.gphdfs_connector, '%JAVA_HOME%' : self.hadoop_util.get_java_home()}
input_file_path = local_path(gpdb_template_conf + "/hadoop_env.sh.%s.t" %self.hadoop_type)
output_file_path = local_path(gpdb_template_conf + "/hadoop_env.sh")
with open(input_file_path, 'r') as input:
with open(output_file_path, 'w') as output:
for line in input.readlines():
for key,value in transforms.iteritems():
line = re.sub(key,value,line)
output.write(line)
cmd_str = "cp %s/hadoop_env.sh %s/lib/hadoop/hadoop_env.sh" %(gpdb_template_conf, gphome)
run_shell_command(cmd_str)
cmd_str = "sudo cp %s/lib/hadoop/%s.jar %s" %(gphome,self.gphdfs_connector, hadoop_common_home)
run_shell_command(cmd_str, "Copying the gphds connector")
def _validate_hostname(self):
etc_hosts_file = "/etc/hosts"
cmd_str = "sudo egrep \"%s\" %s" %(self.fqdn, etc_hosts_file)
# check if hostname present or not, add if not present
if not run_shell_command(cmd_str, "Checking hostname - %s in /etc/hosts" %self.fqdn):
ip_addr = self.get_ip_address()
text_to_append = ip_addr + " " + self.fqdn
# give write permissions to etc/hosts file
run_shell_command("sudo chmod o+w %s" %etc_hosts_file)
with open(etc_hosts_file, "a") as append_file:
append_file.write(text_to_append)
# remove write permissions from etc/hosts file
run_shell_command("sudo chmod o-w %s" %etc_hosts_file)
def integrate(self):
"""
Integrates Hadoop and GPDB by performing the following:
1. Setup kerberos server
2. Setup hadoop cluster
3. Setup GPDB configurations
4. Create sql sepcific test data
"""
# check for GPHOME and MASTER_DATA_DIRECTORY
# throw exception is not set
gphome = os.getenv("GPHOME")
if not gphome:
raise HadoopIntegrationException("GPHOME not set!!")
mdd = os.getenv("MASTER_DATA_DIRECTORY")
if not mdd:
raise HadoopIntegrationException("MASTER_DATA_DIRECTORY not set!!")
self.fqdn = self.hostname + '.' + self.domain
# check if hostname is present in /etc/hosts
# if not append the hostname to file
self._validate_hostname()
# setup kerberos server if security is enabled
if self.secure_hadoop:
self.kerberos_template_conf = local_path(os.path.join(self.template_conf_dir, "kerberos"))
self.kerberos_util = KerberosUtil(self.fqdn, self.domain, self.kerberos_template_conf, self.node_list)
self.kerberos_util.configure_server()
self.kerberos_util.get_kerberos_ticket("hdfs")
self.kerberos_util.get_kerberos_ticket("gpadmin")
# setup hadoop cluster
hadoop_conf_dir = local_path(os.path.join(self.template_conf_dir, "hdfs/rpm"))
if self.hadoop_type == "phd":
self.hadoop_util = PHDRpmUtil(self.hadoop_artifact_url, self.hadoop_install_dir, self.hadoop_data_dir, hadoop_conf_dir, self.fqdn, self.secure_hadoop)
hadoop_guc = "gphd-2.0"
elif self.hadoop_type == "cdh":
self.hadoop_util = CDHRpmUtil(self.hadoop_artifact_url, self.hadoop_install_dir, self.hadoop_data_dir, hadoop_conf_dir, self.fqdn, self.secure_hadoop)
hadoop_guc = "cdh4.1"
elif self.hadoop_type == "apache":
self.hadoop_util = ApacheTarUtil(self.hadoop_artifact_url, self.hadoop_install_dir, self.hadoop_data_dir, hadoop_conf_dir, self.fqdn, self.secure_hadoop)
hadoop_guc = "gphd-2.0"
# setup up hadoop cluster
self.hadoop_util.init_cluster()
hadoop_home = self.hadoop_util.get_hadoop_env()['HADOOP_HOME']
hadoop_common_home = self.hadoop_util.get_hadoop_env()['HADOOP_COMMON_HOME']
if self.hadoop_type == "apache":
hadoop_common_home = hadoop_common_home + "common"
# setup up GPDB configurations & test data
gpdb_template_conf = local_path(os.path.join(self.template_conf_dir, "gpdb"))
self._setup_gpdb_configurations(gphome, mdd, gpdb_template_conf, hadoop_home, hadoop_common_home, hadoop_guc)
export_env = "export HADOOP_HOME=%s; source %s/lib/hadoop/hadoop_env.sh;" %(hadoop_home, gphome)
java_classpath = ".:$CLASSPATH:%s/lib/hadoop/%s" %(gphome, self.gphdfs_connector)
self._create_test_jars(export_env, java_classpath)
self.java_cmd = self._create_java_cmd_string(export_env, java_classpath)
test_data_types = [
'regression', 'time', 'timestamp', 'date', \
'bigint', 'int', 'smallint', 'real', 'float', \
'boolean', 'varchar', 'bpchar', 'numeric', 'text', 'all'
]
datasize = 5000
largedatasize = str(int(datasize) * 2000)
self._create_test_data(datasize, largedatasize, test_data_types)
def get_substitutions(self):
"""
For each sql test, this method will be called
by the SQLTestCase implementing class.
Used for making the substitutions in the sql just before its run
"""
hadoop_home = self.hadoop_util.get_hadoop_env()['HADOOP_HOME']
substitutions = {'%gpfdistPort%': self.gpfdistport,
'%localhost%': self.fqdn,
'%cmdstr%': self.java_cmd,
'%HADOOP_HOST%': self.fqdn + ":8020",
'%HDFSaddr%': self.fqdn + ":8020",
'%MYD%': os.path.join(self.cur_dir, "sql"),
'%HADOOP_FS%': hadoop_home,
'%HADOOP_HOME%': hadoop_home
}
return substitutions
def teardown(self):
"""
This method gets called after each sql test case completes.
Purpose is to clean up the HDFS after a sql test run
"""
tinctest.logger.debug("Running teardown method")
self.hadoop_util.remove_file_from_hdfs('/extwrite/')
self.hadoop_util.remove_file_from_hdfs('/mapreduce/')
self.hadoop_util.remove_file_from_hdfs('/mapred/')
def teardownclass(self):
"""
This method will be called after Each test-suite is finished executing
"""
# clean up hadoop
self.hadoop_util.cleanup()
# clean up kerberos
self.kerberos_util.clean()
| apache-2.0 |
JesseLivezey/pylearn2 | pylearn2/datasets/binarizer.py | 45 | 1455 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.datasets.transformer_dataset import TransformerDataset
from pylearn2.expr.sampling import SampleBernoulli
class Binarizer(TransformerDataset):
"""
A TransformerDataset that takes examples with features in the interval
[0,1], and uses these as Bernoulli parameters to sample examples
with features in {0,1}.
Parameters
----------
raw : pylearn2 Dataset
It must provide examples with features in the interval [0,1].
seed : integer or list of integers, optional
The seed passed to MRG_RandomStreams to make the Bernoulli
samples. If not specified, all class instances default to
the same seed so two instances can be run synchronized side
by side.
"""
def __init__(self, raw, seed=None):
transformer = SampleBernoulli(seed=seed)
super(Binarizer, self).__init__(
raw, transformer, space_preserving=True)
def get_design_matrix(self, topo=None):
"""
.. todo::
WRITEME
"""
if topo is not None:
return self.raw.get_design_matrix(topo)
X = self.raw.get_design_matrix()
return self.transformer.perform(X)
| bsd-3-clause |
WeskerYuan/flydan | sitl.py | 1 | 12399 | #! /usr/bin/python
"""
Software-in-the-loop simulation script for the multi quadcopter flocking control.
This is the main script for the multi quadcopter flocking control (SITL).
The script runs under the dronekit-sitl environment.
A high-level XBee module should be connected for the inter communication between
the drones and the ground control station if specified the hardware ports.
Otherwise, a ZeroMQ publisher-subscriber network is set to simulate the
communication.
The XBee module runs in API2, escaped character mode. By the time written, an
XBee Pro S1 module is used (with the DIJI Mesh firmware). See the official site
of DIJI and the datasheets for more details. Simulated XBee modules uses the
same interface as the real ones.
The dronekit API package supports Python 2.7 for now. Preferably, Ubuntu is
the better choice of onboard Linux OS as it is uses `apt` to get distributed
packages, which is easy to setup and very convenient.
See reference [1] for more details about the algorithm.
Reference:
DIJI Xbee: https://docs.digi.com/display/WirelessConnectivityKit/XBee+API+mode
python-xbee: https://github.com/nioinnovation/python-xbee
DKPY-API Reference: http://python.dronekit.io/automodule.html
Dronekit-SITL: http://python.dronekit.io/develop/sitl_setup.html?highlight=sitl
[1] Q. Yuan, J. Zhan and X. Li, Outdoor flocking of quadcopter drones with
decentralized model predictive control, ISA Transactions, 2017.
Environment:
Computer and OS: Raspberry Model 3B with Ubuntu MATE 16.04LTS.
Wireless module: XBee Pro S1 with DIJI Mesh firmware.
Python packages: dronekit, dronekit-sitl, xbee, numpy
Attibutes:
start_loc(dict): starting location coordinates related to agent_id.
comm_port_list(dict): SITL TCP ports related to agent_id.
Copyright:
Copyright 2017 Quan Yuan, Adaptive Networks and Control Lab,
Research Center of Smart Networks and Systems,
School of Information Science and Engineering,
Fudan University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
import time
import math
import serial
import logging
import argparse
import threading
from dronekit_sitl import SITL
from src import nav
from src import mas
from src import comm
from src import util
from src import shared
def _add_listeners(vehicle):
"""
Add listeners to monitor vehicle status.
Args:
vehicle(dronekit.Vehicle): the copter to be controlled.
"""
@vehicle.on_attribute('mode')
def mode_listener(self,name, msg):
util.log_info("Mode switched to %s" % msg.name)
if msg.name != shared.status['manual_mode']: # manual override
if msg.name == 'RTL' or msg.name == 'LAND':
util.log_warning("External %s detected. Abort." % msg.name)
shared.status['abort'] = True
@vehicle.on_attribute('gps_0')
def gps_listener(self,name, msg): # monitor satellites
if not shared.status['thread_flag'] & shared.NSATS_TOO_LOW:
if msg.satellites_visible < 6:
util.log_warning("Satellites dropped below 5!")
shared.status['thread_flag'] |= shared.NSATS_TOO_LOW
elif msg.satellites_visible >= 10:
util.log_info("Satellites recovered to %d." % msg.satellites_visible)
shared.status['thread_flag'] &= ~shared.NSATS_TOO_LOW
@vehicle.on_message('SYSTEM_TIME')
def time_listener(self,name, msg): # log timestamp
format = '%Y-%m-%d %H:%M:%S'
val = time.localtime(msg.time_unix_usec/1000000)
shared.timestamp = time.strftime(format, val)
def _parse_arguments():
"""
Parse the arguments to the main script and validate the inputs.
Returns:
argparse.ArgumentParser: the argument structure.
"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'Arguments for the SITL simulation.'
)
parser.add_argument('-id', type=str, default='FF', metavar='AgentID', required=True,
help="AGENT_ID, must be a 2-digit integer.")
parser.add_argument('-alt', type=float, default=15.0, metavar='',
help='Takeoff altitude, within [10.0, 100.0] (m).')
parser.add_argument('-xbee', type=str, default=None, metavar='',
help="XBee module's device path. If not provided, use ZeroMQ.")
parser.add_argument('-pix', type=str, default='fw/ac3.5.2_port5760', metavar='',
help="Pixhawk's device path. Can be SITL firmware.")
parser.add_argument('-algorithm', '-a', type=str, default='MPC', metavar='',
choices=['Vicsek','MPC'],
help="Algorithm used for main script.")
parser.add_argument('-character', '-c', type=str, default='follower', metavar='',
choices=['square','passive','follower'],
help="Whether this agent is leader or follower?")
parser.add_argument('-n', type=int, default=5, metavar='',
help="Total agent count.")
parser.add_argument('-level', '-l', type=str, default='info', metavar='',
choices=['warning','debug','info'],
help="Logging level: ['warning','debug','info']")
args = parser.parse_args()
# get correct parameters
if args.alt < 10.0 or args.alt > 100.0:
raise Exception('-alt should between [10.0, 100.0]')
if not args.id.isdigit() or len(args.id) != 2:
raise Exception('-id shoud be a 2-digit integer')
return args
def _choose_algorithm(vehicle, xbee, neighbors):
"""
Choose which algorithm thread to be instantiated.
Args:
vehicle(dronekit.Vehicle): the copter to be controlled.
xbee(xbee.Zigbee): the XBee communication interface.
neighbors(dict): the dictionary containing neighbors data.
Returns:
mas.Object: different thread instance based on the parameters.
"""
if shared.AGENT_CHARACTER == 'square':
return mas.SquareRoute(vehicle, xbee)
elif shared.AGENT_CHARACTER == 'passive':
return mas.PassiveLeader(vehicle, xbee)
elif shared.CURRENT_ALGORITHM == 'Vicsek':
return mas.Vicsek(vehicle, xbee, neighbors)
elif shared.CURRENT_ALGORITHM == 'MPC':
return mas.Decentralized(vehicle, xbee, neighbors)
# starting location GNSS coordinates. Modify accordingly.
# Format: latitude,longitude,MSL altitude, heading
# 'FFF' is reserved for not IDed ones. Not available when using pyzmq.
start_loc = {
'A01': '31.2991103,121.4953190,9,340',
'A02': '31.2989222,121.4954363,9,340',
'A03': '31.2988302,121.4953633,9,340',
'A04': '31.2988857,121.4954170,9,340',
'A05': '31.2989833,121.4955480,9,340',
'FFF': '31.3012010,121.4981920,9,340'
}
# port list for SITL communications
comm_port_list = {
'A01': 5789,
'A02': 6789,
'A03': 7789,
'A04': 8789,
'A05': 9789,
'GCS': 1789
}
def main():
"""
The Main function of this script.
"""
args = _parse_arguments()
util.log_init("sitl_A%s_%s.txt" % (args.id, util.get_latest_log("latest_sitl.txt")), util.log_level[args.level])
shared.AGENT_ID = 'A%s' % args.id
shared.AGENT_COUNT = args.n
shared.CURRENT_ALGORITHM = args.algorithm
shared.AGENT_CHARACTER = args.character
shared.des_alt = args.alt
util.log_info("AGENT_ID = %s" % shared.AGENT_ID)
util.log_info("Algorithm: %s" % shared.CURRENT_ALGORITHM)
util.log_info("Agent type: %s" % shared.AGENT_CHARACTER)
print "Start simulator (SITL)"
sitl = SITL(args.pix) # initialize SITL with firmware path
if shared.AGENT_ID in start_loc:
sitl_args = ['--home=%s' % start_loc[shared.AGENT_ID]]
else:
sitl_args = ['--home=%s' % start_loc['FFF']]
# Pre-recorded coordinates.
#sitl_args = ['-I0', '--model', 'quad', '--home=31.301201,121.498192,9,353']
sitl.launch(sitl_args, await_ready=True, restart=True)
# Connect to the vehicle. (Spawn an instance of Vehicle named "vehicle")
# connection port is coded in the file name of the firmware like "ac3.4.5_port5760"
# use regular expression to search the string and extract port number
port = re.search(r'port\d{4}', args.pix)
port = re.search(r'\d{4}', port.group()).group()
print "Connecting to copter on: TCP: 127.0.0.1:%s" % port
copter = nav.connect('tcp:127.0.0.1:%s' % port, wait_ready=True, rate=20)
util.log_info("Copter connected. Firmware: %s" % copter.version)
if not args.xbee: # simulate XBee using ZeroMQ
[pub, sub] = comm.zmq_init(comm_port_list[shared.AGENT_ID], comm_port_list)
subscriber_thread = comm.Subscriber(shared.AGENT_ID, sub)
subscriber_thread.start()
xbee = pub # make xbee the publisher
util.log_info("ZeroMQ initialzied.")
else: # use actual xbee ports
ser = serial.Serial(args.xbee, 57600)
xbee = comm.xbee_init(ser)
util.log_info("Xbee initialzed.")
info = "IFO,%s connected with firmware %s" % (shared.AGENT_ID, copter.version)
comm.xbee_broadcast(xbee, info)
_add_listeners(copter)
takeoff_thread = nav.Takeoff(copter, xbee, shared.des_alt, 3)
purge_thread = comm.Purge(shared.neighbors)
broadcast_thread = comm.Broadcast(shared.AGENT_ID, copter, xbee)
flocking_thread = _choose_algorithm(copter, xbee, shared.neighbors)
takeoff_thread.start()
takeoff_thread.join() # wait until takeoff procedure completed
if shared.status['airborne']: # only execute the threads when airborne
util.log_info("Copter is airborne, starting threads.")
broadcast_thread.start()
purge_thread.start()
flocking_thread.start()
# main loop
while True:
try: time.sleep(.2)
except KeyboardInterrupt: break
if shared.status['airborne']:
# echo exiting status
if shared.status['exiting']:
info = "IFO,%s %s-ing." % (shared.AGENT_ID,shared.status['command'])
comm.xbee_broadcast(xbee, info)
util.log_info(info)
# if an rtl or land command is received, kill flocking and set the `exiting` flag
elif shared.status['command'] == 'RTL' or shared.status['command'] == 'LAND':
shared.status['thread_flag'] |= shared.FLOCKING_FLAG
nav.set_mode(copter, shared.status['command'])
shared.status['exiting'] = True
if not flocking_thread.is_alive(): # break the loop if finished
break
nav.wait_for_disarm(copter) # wait for disarm
comm.xbee_broadcast(xbee, 'IFO,%s terminated.' % shared.AGENT_ID)
# clean up
purge_thread.stop()
while purge_thread.is_alive():
util.log_info('Waiting for purge to shutdown')
purge_thread.join(3)
util.log_info('Purge killed.')
broadcast_thread.stop()
while broadcast_thread.is_alive():
util.log_info('Waiting for broadcast to shutdown')
broadcast_thread.join(3)
util.log_info('Broadcast killed.')
copter.close()
util.log_info("Copter shutdown.")
if args.xbee:
xbee.halt()
ser.close()
util.log_info("Xbee and serial closed.")
else:
subscriber_thread.stop()
while subscriber_thread.is_alive():
util.log_info('Waiting for Subscriber to shutdown')
subscriber_thread.join(3)
util.log_info('Subscriber killed.')
sitl.stop()
util.log_info("SITL shutdown.")
if __name__ == '__main__':
main() | apache-2.0 |
Intel-tensorflow/tensorflow | tensorflow/python/kernel_tests/where_op_test.py | 9 | 11173 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None, fn=array_ops.where):
with self.cached_session():
ans = fn(x)
self.assertTrue(ans.get_shape().is_compatible_with([None, x.ndim]))
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testWrongNumbers(self, fn=array_ops.where):
with self.session():
with self.assertRaises(ValueError):
fn([False, True], [1, 2], None)
with self.assertRaises(ValueError):
fn([False, True], None, [1, 2])
def _testBasicVec(self, fn=array_ops.where):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandomVec(self, fn=array_ops.where):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth, None, fn)
def _testBasicMat(self, fn=array_ops.where):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testBasic3Tensor(self, fn=array_ops.where):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandom(self, dtype, expected_err_re=None, fn=array_ops.where):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re, fn)
def _testThreeArgument(self, fn=array_ops.where):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session():
tf_val = self.evaluate(fn(constant_op.constant(x) > 0, x * x, -x))
self.assertAllEqual(tf_val, np_val)
def testWrongNumbers(self):
self._testWrongNumbers()
@test_util.run_deprecated_v1
def testBasicVec(self):
self._testBasicVec()
@test_util.run_deprecated_v1
def testRandomVec(self):
self._testRandomVec()
@test_util.run_deprecated_v1
def testBasicMat(self):
self._testBasicMat()
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
self._testBasic3Tensor()
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
self._testThreeArgument()
def testV2WrongNumbers(self):
self._testWrongNumbers(array_ops.where_v2)
def testV2BasicVec(self):
self._testBasicVec(array_ops.where_v2)
def testV2RandomVec(self):
self._testRandomVec(array_ops.where_v2)
def testV2BasicMat(self):
self._testBasicMat(array_ops.where_v2)
def testV2Basic3Tensor(self):
self._testBasic3Tensor(array_ops.where_v2)
def testV2RandomBool(self):
self._testRandom(np.bool, None, array_ops.where_v2)
def testV2RandomInt32(self):
self._testRandom(np.int32, None, array_ops.where_v2)
def testV2RandomInt64(self):
self._testRandom(np.int64, None, array_ops.where_v2)
def testV2RandomFloat(self):
self._testRandom(np.float32, None, array_ops.where_v2)
def testV2RandomDouble(self):
self._testRandom(np.float64, None, array_ops.where_v2)
def testV2RandomComplex64(self):
self._testRandom(np.complex64, None, array_ops.where_v2)
def testV2RandomComplex128(self):
self._testRandom(np.complex128, None, array_ops.where_v2)
def testV2RandomUint8(self):
self._testRandom(np.uint8, None, array_ops.where_v2)
def testV2RandomInt8(self):
self._testRandom(np.int8, None, array_ops.where_v2)
def testV2RandomInt16(self):
self._testRandom(np.int16, None, array_ops.where_v2)
def testV2ThreeArgument(self):
self._testThreeArgument(array_ops.where_v2)
def testV2Broadcasting(self):
f = np.random.normal(0, 1, (3, 5, 1, 1))
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(f < 0, x, y)
with self.test_session():
tf_val = self.evaluate(
array_ops.where_v2(constant_op.constant(f) < 0, x, y))
self.assertAllEqual(tf_val, np_val)
def testV2ScalarBroadcasting(self):
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(True, x, y)
with self.test_session():
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant(True, dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2VectorBroadcasting(self):
x = np.zeros(7)
y = np.ones(7)
np_val = np.where([True], x, y)
with self.test_session():
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant([True], dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2PredBroadcasting(self):
pred = np.array([1, 0, 0]).reshape((3, 1))
x = np.random.randn(3, 4)
y = np.random.randn(3, 4)
np_val = np.where(pred, x, y)
with self.test_session():
tf_val = self.evaluate(array_ops.where_v2(pred, x, y))
self.assertAllClose(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session():
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
self.evaluate(v.initializer)
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
self.evaluate(x.initializer)
self.evaluate(y.initializer)
self.evaluate(c.initializer)
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| apache-2.0 |
q1ang/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
makinacorpus/Geotrek | geotrek/maintenance/migrations/0014_auto_20200316_1245.py | 2 | 1996 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2020-03-16 11:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def intervention_sgnage_infrastructure(apps, schema_editor):
InterventionModel = apps.get_model('maintenance', 'Intervention')
ContentTypeModel = apps.get_model("contenttypes", "ContentType")
signage = ContentTypeModel.objects.get(app_label='signage', model='signage')
infrastructure = ContentTypeModel.objects.get(app_label='infrastructure', model='infrastructure')
topology = ContentTypeModel.objects.get(app_label='core', model='topology')
InterventionModel.objects.filter(topology__kind='INTERVENTION').update(target_type=topology, target_id=models.F('topology_id'))
InterventionModel.objects.filter(topology__kind='SIGNAGE').update(target_type=signage,
target_id=models.F('topology_id'))
InterventionModel.objects.filter(topology__kind='INFRASTRUCTURE').update(target_type=infrastructure,
target_id=models.F('topology_id'))
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('maintenance', '0013_auto_20200406_1410'),
]
operations = [
migrations.AddField(
model_name='intervention',
name='target_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='intervention',
name='target_id',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.RunPython(intervention_sgnage_infrastructure),
migrations.RemoveField(
model_name='intervention',
name='topology',
),
]
| bsd-2-clause |
riveridea/gnuradio | gr-qtgui/python/qtgui/qa_qtgui.py | 54 | 2790 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, qtgui
class test_qtgui(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
# Tests to make sure we can instantiate the sink.
# We use '5' in place of filter.firdes.WIN_BLACKMAN_hARRIS so we
# don't have to worry about importing filter just for this one
# constant.
def test01(self):
self.qtsnk = qtgui.sink_c(1024, 5,
0, 1, "Test",
True, True, True, True)
def test02(self):
self.qtsnk = qtgui.sink_f(1024, 5,
0, 1, "Test",
True, True, True, True)
def test03(self):
self.qtsnk = qtgui.time_sink_c(1024, 1, "Test", 1)
def test04(self):
self.qtsnk = qtgui.time_sink_f(1024, 1, "Test", 1)
def test05(self):
self.qtsnk = qtgui.freq_sink_c(1024, 5,
0, 1, "Test", 1)
def test06(self):
self.qtsnk = qtgui.freq_sink_f(1024, 5,
0, 1, "Test", 1)
def test07(self):
self.qtsnk = qtgui.waterfall_sink_c(1024, 5,
0, 1, "Test")
def test08(self):
self.qtsnk = qtgui.waterfall_sink_f(1024, 5,
0, 1, "Test")
def test09(self):
self.qtsnk = qtgui.const_sink_c(1024, "Test", 1)
def test10(self):
self.qtsnk = qtgui.time_raster_sink_b(1024, 100, 100.5,
[], [], "Test", 1)
def test11(self):
self.qtsnk = qtgui.time_raster_sink_f(1024, 100, 100.5,
[], [], "Test", 1)
def test12(self):
self.qtsnk = qtgui.histogram_sink_f(1024, 100, -1, 1, "Test", 1)
if __name__ == '__main__':
gr_unittest.run(test_qtgui, "test_qtgui.xml")
| gpl-3.0 |
xiaom-GitHub/openthread | tools/harness-automation/cases/router_5_3_3.py | 16 | 1997 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Router_5_3_3(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 3 3'
golden_devices_required = 4
def on_dialog(self, dialog, title):
if title.startswith('Start DUT'):
self.dut.enable_blacklist()
self.dut.add_blacklist('166e0a000000005')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
leiferikb/bitpop | build/scripts/slave/recipe_modules/tryserver/api.py | 1 | 4689 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from slave import recipe_api
PATCH_STORAGE_RIETVELD = 'rietveld'
PATCH_STORAGE_GIT = 'git'
PATCH_STORAGE_SVN = 'svn'
class TryserverApi(recipe_api.RecipeApi):
@property
def patch_url(self):
"""Reads patch_url property and corrects it if needed."""
url = self.m.properties.get('patch_url')
return url
@property
def is_tryserver(self):
"""Returns true iff we can apply_issue or patch."""
return self.can_apply_issue or self.is_patch_in_svn or self.is_patch_in_git
@property
def can_apply_issue(self):
"""Returns true iff the properties exist to apply_issue from rietveld."""
return (self.m.properties.get('rietveld')
and 'issue' in self.m.properties
and 'patchset' in self.m.properties)
@property
def is_patch_in_svn(self):
"""Returns true iff the properties exist to patch from a patch URL."""
return self.patch_url
@property
def is_patch_in_git(self):
return (self.m.properties.get('patch_storage') == PATCH_STORAGE_GIT and
self.m.properties.get('patch_repo_url') and
self.m.properties.get('patch_ref'))
def _apply_patch_step(self, patch_content, root):
patch_cmd = [
'patch',
'--dir', root or self.m.path['checkout'],
'--force',
'--forward',
'--input', patch_content,
'--remove-empty-files',
'--strip', '0',
]
yield self.m.step('apply patch', patch_cmd)
def apply_from_svn(self, cwd):
"""Downloads patch from patch_url using svn-export and applies it"""
# TODO(nodir): accept these properties as parameters
patch_url = self.patch_url
def link_patch(step_result):
"""Links the patch.diff file on the waterfall."""
step_result.presentation.logs['patch.diff'] = (
step_result.raw_io.output.split('\n'))
patch_file = self.m.raw_io.output('.diff')
ext = '.bat' if self.m.platform.is_win else ''
svn_cmd = ['svn' + ext, 'export', '--force', patch_url, patch_file]
yield self.m.step('download patch', svn_cmd, followup_fn=link_patch,
step_test_data=self.test_api.download_patch)
patch_content = self.m.raw_io.input(
self.m.step_history.last_step().raw_io.output)
yield self._apply_patch_step(patch_content, cwd)
def apply_from_git(self, cwd):
"""Downloads patch from given git repo and ref and applies it"""
# TODO(nodir): accept these properties as parameters
patch_repo_url = self.m.properties['patch_repo_url']
patch_ref = self.m.properties['patch_ref']
patch_dir = self.m.path.mkdtemp('patch')
git_setup_py = self.m.path['build'].join('scripts', 'slave', 'git_setup.py')
git_setup_args = ['--path', patch_dir, '--url', patch_repo_url]
patch_path = patch_dir.join('patch.diff')
yield (
self.m.python('patch git setup', git_setup_py, git_setup_args),
self.m.git('fetch', 'origin', patch_ref,
name='patch fetch', cwd=patch_dir),
self.m.git('clean', '-f', '-d', '-x',
name='patch clean', cwd=patch_dir),
self.m.git('checkout', '-f', 'FETCH_HEAD',
name='patch git checkout', cwd=patch_dir),
self._apply_patch_step(patch_path, cwd),
self.m.step('remove patch', ['rm', '-rf', patch_dir]),
)
def determine_patch_storage(self):
"""Determines patch_storage automatically based on properties."""
storage = self.m.properties.get('patch_storage')
if storage:
return storage
if self.can_apply_issue:
return PATCH_STORAGE_RIETVELD
elif self.is_patch_in_svn:
return PATCH_STORAGE_SVN
def maybe_apply_issue(self, cwd=None, authentication=None):
"""If we're a trybot, apply a codereview issue.
Args:
cwd: If specified, apply the patch from the specified directory.
authentication: authentication scheme whenever apply_issue.py is called.
This is only used if the patch comes from Rietveld. Possible values:
None, 'oauth2' (see also api.rietveld.apply_issue.)
"""
storage = self.determine_patch_storage()
if storage == PATCH_STORAGE_RIETVELD:
yield self.m.rietveld.apply_issue(
self.m.rietveld.calculate_issue_root(),
authentication=authentication)
elif storage == PATCH_STORAGE_SVN:
yield self.apply_from_svn(cwd)
elif storage == PATCH_STORAGE_GIT:
yield self.apply_from_git(cwd)
else:
# Since this method is "maybe", we don't raise an Exception.
pass
| gpl-3.0 |
NMGRL/pychron | pychron/hardware/kerr/kerr_circularstep_motor.py | 2 | 6247 | # ==============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import CInt
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.kerr.kerr_step_motor import KerrStepMotor
import time
from pychron.hardware.core.data_helper import make_bitarray
from six.moves import range
'''
status byte
0 1 2 3 4 5 6 7
1 0 0 0 0 0 0 0 1
18 0 0 0 1 0 0 1 0
0= moving
1= comm err
2= amp enable output signal is HIGH
3= power sense input signal is HIGH
4= at speed
5= vel prof mode
6= trap prof mode
7= home in progress
'''
class KerrCircularStepMotor(KerrStepMotor):
min = CInt
max = CInt
def _get_io_bits(self):
return ['1', # bit 4
'1',
'1',
'1',
'0'] # bit 0
def _home_motor(self, *args, **kw):
# start moving
progress = self.progress
if progress is not None:
progress = kw['progress']
# progress.increase_max()
# progress.change_message('Homing {}'.format(self.name))
# progress.increment()
# from threading import Event, Thread
# signal = Event()
# t = Thread(target=self.progress_update, args=(progress, signal))
# t.start()
# ======================================================================
# step 1. move positive until prox switch is on
# ======================================================================
# set to max pos
self._set_motor_position(self.max, velocity=self.home_velocity)
# wait until prox switch is on
self._proximity_move(True, n=1, progress=progress)
# ======================================================================
# step 2. reset pos, move positive 55
# ======================================================================
self.reset_position(motor_off=False)
moffset = 55
self._set_motor_position(moffset, velocity=self.home_velocity)
# =====================================================================
# step 3. move 1 step incrementally until home switch set (and proximity not set)
# =====================================================================
for i in range(10):
self._set_motor_position(i + 1 + moffset, velocity=1)
time.sleep(0.1)
lim = self._read_limits()
if not int(lim[4]) and int(lim[2]):
break
# ======================================================================
# step 4. set current position as 0
# ======================================================================
self.reset_position(motor_off=False)
# signal.set()
progress.change_message('{} homing complete'.format(self.name), auto_increment=False)
def _proximity_move(self, onoff, n=2, progress=None):
addr = self.address
cnt = 0
period = 0.0125
tc = 0
totalcnts = 30 / period
prog = progress
# poll proximity switch wait for n successes
while cnt < n and tc < totalcnts:
time.sleep(period)
lim = self._get_proximity_limit()
if (onoff and lim) or (not onoff and not lim):
cnt += 1
if cnt % 10 == 0 and prog:
prog.change_message('Limit={}, cnt={}'.format(lim, tc), auto_increment=False)
tc += 1
# stop moving when proximity limit set
cmds = [(addr, '1707', 100, 'Stop motor'), # leave amp on
(addr, '00', 100, 'Reset Position')]
self._execute_hex_commands(cmds)
if tc >= totalcnts:
self.warning_dialog('Failed Homing motor')
def _read_limits(self, verbose=False):
cb = '00001000'
inb = self.read_status(cb, verbose=False)
if inb:
# resp_byte consists of input_byte
ba = make_bitarray(int(inb[2:-2], 16))
return ba
def _get_proximity_limit(self):
ba = self._read_limits()
return int(ba[4])
def _load_home_control_byte(self):
'''
control byte
7 6 5 4 3 2 1 0
97- 1 0 0 1 0 1 1 1
0=capture home on limit1
1=capture home on limit2
2=turn motor off on home
3=capture home on home
4=stop abruptly
5=stop smoothly
6,7=not used- clear to 0
'''
return int('00011000', 2)
# ============= EOF =============================================
#
# def _load_trajectory_controlbyte(self):
# '''
# control byte
# 7 6 5 4 3 2 1 0
# 97- 1 0 0 1 0 1 1 1
#
# 0=load pos
# 1=load vel
# 2=load acce
# 3=load pwm
# 4=enable servo
# 5=profile mode 0=trap 1=vel
# 6=direction trap mode 0=abs 1=rel vel mode 0=for. 1=back
# 7=start motion now
#
# '''
# return '{:02x}'.format(int('10000111', 2))
#
# def _get_velocity(self):
# speed = self._velocity #in um/sec
# res = 0.5
# steprate = speed / res
# result = round(steprate / 25)
# result = min(max(1, result), 250)
# print 'calcualtes velocity', result
# return result
| apache-2.0 |
John-Shaw/shadowsocks | tests/test_udp_src.py | 1009 | 2482 | #!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
| apache-2.0 |
NLnetLabs/unbound | libunbound/python/examples/ns-lookup.py | 18 | 1916 | #!/usr/bin/python
# vim:fileencoding=utf-8
'''
ns-lookup.py: Example shows how to lookup for NS records
Authors: Zdenek Vasicek (vasicek AT fit.vutbr.cz)
Marek Vavrusa (xvavru00 AT stud.fit.vutbr.cz)
Copyright (c) 2008. All rights reserved.
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import print_function
import unbound
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
status, result = ctx.resolve("vutbr.cz", unbound.RR_TYPE_NS, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print("Result:")
print(" raw data:", result.data)
for k in sorted(result.data.domain_list):
print(" host: %s" % k)
| bsd-3-clause |
rembo10/headphones | lib/oauth2/__init__.py | 26 | 23443 | """
The MIT License
Copyright (c) 2007 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2 as httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {
'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret
}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
if method is not None:
self.method = method
if url is not None:
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
parts = urlparse.urlparse(value)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme != 'http' and scheme != 'https':
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
value = '%s://%s%s' % (scheme, netloc, path)
self.__dict__['url'] = value
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return self.encode_postdata(self)
def encode_postdata(self, data):
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(data, True)
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.url, self.to_postdata())
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = [(k, v) for k, v in self.items() if k != 'oauth_signature']
encoded_str = urllib.urlencode(sorted(items), True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None,
force_auth_header=False):
if not isinstance(headers, dict):
headers = {}
if body and method == "POST":
parameters = dict(parse_qsl(body))
elif method == "GET":
parsed = urlparse.urlparse(uri)
parameters = parse_qs(parsed.query)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer, token=self.token,
http_method=method, http_url=uri, parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if force_auth_header:
# ensure we always send Authorization
headers.update(req.to_header())
if method == "POST":
if not force_auth_header:
body = req.to_postdata()
else:
body = req.encode_postdata(req.get_nonoauth_parameters())
headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif method == "GET":
if not force_auth_header:
uri = req.to_url()
else:
if not force_auth_header:
# don't call update twice.
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
sig = (
escape(request.method),
escape(request.url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| gpl-3.0 |
cedriclaunay/gaffer | python/GafferScene/AlembicPath.py | 1 | 2881 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreAlembic
import Gaffer
class AlembicPath( Gaffer.Path ) :
def __init__( self, fileNameOrAlembicInput, path, root="/", filter=None ) :
Gaffer.Path.__init__( self, path, root, filter=filter )
if isinstance( fileNameOrAlembicInput, basestring ) :
self.__rootInput = IECoreAlembic( fileNameOrAlembicInput )
else :
assert( isinstance( fileNameOrAlembicInput, IECoreAlembic.AlembicInput ) )
self.__rootInput = fileNameOrAlembicInput
def isValid( self ) :
try :
self.__input()
return True
except :
return False
def isLeaf( self ) :
# any alembic object may have children.
return False
def info( self ) :
return Gaffer.Path.info( self )
def _children( self ) :
childNames = self.__input().childNames()
return [ AlembicPath( self.__rootInput, self[:] + [ x ], self.root() ) for x in childNames ]
def copy( self ) :
return AlembicPath( self.__rootInput, self[:], self.root(), self.getFilter() )
def __input( self ) :
result = self.__rootInput
for p in self :
result = result.child( p )
return result
| bsd-3-clause |
PaulGrimal/peach | peach/optm/__init__.py | 6 | 1629 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/__init__.py
# Makes the optm directory a package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements deterministic optimization methods. Consult:
base
Basic definitions and interface with the optimization methods;
linear
Basic methods for one variable optimization;
multivar
Gradient, Newton and othe multivariable optimization methods;
quasinewton
Quasi-Newton methods;
Every optimizer works in pretty much the same way. Instantiate the respective
class, using as parameter the cost function to be optimized, the first estimate
(a scalar in case of a single variable optimization, and a one-dimensional array
in case of multivariable optimization) and some other parameters. Use ``step()``
to perform one iteration of the method, use the ``__call__()`` method to perform
the search until the stop conditions are met. See each method for details.
"""
# __all__ = [ 'base', 'linear', 'multivar', 'quasinewton' ]
################################################################################
# Imports sub-packages
from peach.optm.base import * # Basic definitions
from peach.optm.linear import * # Linear and 1-D optimization
from peach.optm.multivar import * # Gradient and Newton methods
from peach.optm.quasinewton import * # Quasi-newton methods
| lgpl-2.1 |
sivakuna-aap/superdesk | server/apps/rules/routing_rules.py | 2 | 17866 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
import pytz
from pytz import all_timezones_set
from enum import Enum
from datetime import datetime, timedelta
from superdesk import get_resource_service
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.errors import SuperdeskApiError
from eve.utils import config
from superdesk.metadata.item import CONTENT_STATE
logger = logging.getLogger(__name__)
class Weekdays(Enum):
"""Weekdays names we use for scheduling."""
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
@classmethod
def is_valid_schedule(cls, list_of_days):
"""Test if all days in list_of_days are valid day names.
:param list list_of_days eg. ['mon', 'tue', 'fri']
"""
return all([day.upper() in cls.__members__ for day in list_of_days])
@classmethod
def is_scheduled_day(cls, today, list_of_days):
"""Test if today's weekday is in schedule.
:param datetime today
:param list list_of_days
"""
return today.weekday() in [cls[day.upper()].value for day in list_of_days]
@classmethod
def dayname(cls, day):
"""Get name shortcut (MON, TUE, ...) for given day.
:param datetime day
"""
return cls(day.weekday()).name
def set_time(current_datetime, timestr, second=0):
"""Set time of given datetime according to timestr.
Time format for timestr is `%H%M`, eg. 1014.
:param datetime current_datetime
:param string timestr
:param int second
"""
if timestr is None:
timestr = '0000'
time = datetime.strptime(timestr, '%H%M')
return current_datetime.replace(hour=time.hour, minute=time.minute, second=second)
class RoutingRuleSchemeResource(Resource):
"""
Resource class for 'routing_schemes' endpoint
"""
schema = {
'name': {
'type': 'string',
'iunique': True,
'required': True,
'nullable': False,
'empty': False
},
'rules': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'name': {
'type': 'string'
},
'filter': Resource.rel('content_filters', nullable=True),
'actions': {
'type': 'dict',
'schema': {
'fetch': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'desk': Resource.rel('desks', True),
'stage': Resource.rel('stages', True),
'macro': {'type': 'string'}
}
}
},
'publish': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'desk': Resource.rel('desks', True),
'stage': Resource.rel('stages', True),
'macro': {'type': 'string'}
}
}
},
'exit': {
'type': 'boolean'
},
'preserve_desk': {
'type': 'boolean'
}
}
},
'schedule': {
'type': 'dict',
'nullable': True,
'schema': {
'day_of_week': {
'type': 'list'
},
'hour_of_day_from': {
'type': 'string'
},
'hour_of_day_to': {
'type': 'string'
},
'time_zone': {
'type': 'string',
'nullable': False,
'default': 'UTC'
}
}
}
}
}
}
}
privileges = {'POST': 'routing_rules', 'DELETE': 'routing_rules', 'PATCH': 'routing_rules'}
class RoutingRuleSchemeService(BaseService):
"""
Service class for 'routing_schemes' endpoint.
"""
def on_create(self, docs):
"""
Overriding to check the below pre-conditions:
1. A routing scheme must have at least one rule.
2. Every rule in the routing scheme must have name, filter and at least one action
Will throw BadRequestError if any of the pre-conditions fail.
"""
for routing_scheme in docs:
self._adjust_for_empty_schedules(routing_scheme)
self._validate_routing_scheme(routing_scheme)
self._check_if_rule_name_is_unique(routing_scheme)
def on_update(self, updates, original):
"""
Overriding to check the below pre-conditions:
1. A routing scheme must have at least one rule.
2. Every rule in the routing scheme must have name, filter and at least one action
Will throw BadRequestError if any of the pre-conditions fail.
"""
self._adjust_for_empty_schedules(updates)
self._validate_routing_scheme(updates)
self._check_if_rule_name_is_unique(updates)
def on_delete(self, doc):
"""
Overriding to check the below pre-conditions:
1. A routing scheme shouldn't be associated with an Ingest Provider.
Will throw BadRequestError if any of the pre-conditions fail.
"""
if self.backend.find_one('ingest_providers', req=None, routing_scheme=doc[config.ID_FIELD]):
raise SuperdeskApiError.forbiddenError('Routing scheme is applied to channel(s). It cannot be deleted.')
def apply_routing_scheme(self, ingest_item, provider, routing_scheme):
"""
applies routing scheme and applies appropriate action (fetch, publish) to the item
:param item: ingest item to which routing scheme needs to applied.
:param provider: provider for which the routing scheme is applied.
:param routing_scheme: routing scheme.
"""
rules = routing_scheme.get('rules', [])
if not rules:
logger.warning("Routing Scheme % for provider % has no rules configured." %
(provider.get('name'), routing_scheme.get('name')))
filters_service = superdesk.get_resource_service('content_filters')
now = datetime.utcnow()
for rule in self._get_scheduled_routing_rules(rules, now):
content_filter = rule.get('filter', {})
if filters_service.does_match(content_filter, ingest_item):
if rule.get('actions', {}).get('preserve_desk', False) and ingest_item.get('task', {}).get('desk'):
desk = get_resource_service('desks').find_one(req=None, _id=ingest_item['task']['desk'])
self.__fetch(ingest_item, [{'desk': desk[config.ID_FIELD], 'stage': desk['incoming_stage']}])
fetch_actions = [f for f in rule.get('actions', {}).get('fetch', [])
if f.get('desk') != ingest_item['task']['desk']]
else:
fetch_actions = rule.get('actions', {}).get('fetch', [])
self.__fetch(ingest_item, fetch_actions)
self.__publish(ingest_item, rule.get('actions', {}).get('publish', []))
if rule.get('actions', {}).get('exit', False):
break
else:
logger.info("Routing rule %s of Routing Scheme %s for Provider %s did not match for item %s" %
(rule.get('name'), routing_scheme.get('name'),
provider.get('name'), ingest_item[config.ID_FIELD]))
def _adjust_for_empty_schedules(self, routing_scheme):
"""For all routing scheme's rules, set their non-empty schedules to
None if they are effectively not defined.
A schedule is recognized as "not defined" if it only contains time zone
information without anything else. This can happen if an empty schedule
is submitted by the client, because `Eve` then converts it to the
following:
{'time_zone': 'UTC'}
This is because the time_zone field has a default value set in the
schema, and Eve wants to apply it even when the containing object (i.e.
the schedule) is None and there is nothing that would contain the time
zone information.
:param dict routing_scheme: the routing scheme to check
"""
for rule in routing_scheme.get('rules', []):
schedule = rule.get('schedule')
if schedule:
if set(schedule.keys()) == {'time_zone'}:
rule['schedule'] = None
elif 'time_zone' not in schedule.keys():
schedule['time_zone'] = 'UTC'
def _validate_routing_scheme(self, routing_scheme):
"""
Validates routing scheme for the below:
1. A routing scheme must have at least one rule.
2. Every rule in the routing scheme must have name, filter and at least one action
Will throw BadRequestError if any of the conditions fail.
:param routing_scheme:
"""
routing_rules = routing_scheme.get('rules', [])
if len(routing_rules) == 0:
raise SuperdeskApiError.badRequestError(message="A Routing Scheme must have at least one Rule")
for routing_rule in routing_rules:
invalid_fields = [field for field in routing_rule.keys()
if field not in ('name', 'filter', 'actions', 'schedule')]
if invalid_fields:
raise SuperdeskApiError.badRequestError(
message="A routing rule has invalid fields %s".format(invalid_fields))
schedule = routing_rule.get('schedule')
actions = routing_rule.get('actions')
if routing_rule.get('name') is None:
raise SuperdeskApiError.badRequestError(message="A routing rule must have a name")
elif actions is None or len(actions) == 0 or (actions.get('fetch') is None and actions.get(
'publish') is None and actions.get('exit') is None):
raise SuperdeskApiError.badRequestError(message="A routing rule must have actions")
else:
self._validate_schedule(schedule)
def _validate_schedule(self, schedule):
"""Check if the given routing schedule configuration is valid and raise
an error if this is not the case.
:param dict schedule: the routing schedule configuration to validate
:raises SuperdeskApiError: if validation of `schedule` fails
"""
if schedule is not None \
and (len(schedule) == 0
or (schedule.get('day_of_week') is None
or len(schedule.get('day_of_week', [])) == 0)):
raise SuperdeskApiError.badRequestError(message="Schedule when defined can't be empty.")
if schedule:
if not Weekdays.is_valid_schedule(schedule.get('day_of_week', [])):
raise SuperdeskApiError.badRequestError(message="Invalid values for day of week.")
if schedule.get('hour_of_day_from') or schedule.get('hour_of_day_to'):
try:
from_time = datetime.strptime(schedule.get('hour_of_day_from'), '%H%M')
except:
raise SuperdeskApiError.badRequestError(message="Invalid value for from time.")
to_time = schedule.get('hour_of_day_to', '')
if to_time:
try:
to_time = datetime.strptime(to_time, '%H%M')
except:
raise SuperdeskApiError.badRequestError(
message="Invalid value for hour_of_day_to "
"(expected %H%M).")
if from_time > to_time:
raise SuperdeskApiError.badRequestError(
message="From time should be less than To Time."
)
time_zone = schedule.get('time_zone')
if time_zone and (time_zone not in all_timezones_set):
msg = 'Unknown time zone {}'.format(time_zone)
raise SuperdeskApiError.badRequestError(message=msg)
def _check_if_rule_name_is_unique(self, routing_scheme):
"""
Checks if name of a routing rule is unique or not.
"""
routing_rules = routing_scheme.get('rules', [])
for routing_rule in routing_rules:
rules_with_same_name = [rule for rule in routing_rules if rule.get('name') == routing_rule.get('name')]
if len(rules_with_same_name) > 1:
raise SuperdeskApiError.badRequestError("Rule Names must be unique within a scheme")
def _get_scheduled_routing_rules(self, rules, current_dt_utc):
"""
Iterates rules list and returns the list of rules that are scheduled.
:param list rules: routing rules to check
:param datetime current_dt_utc: the value to take as the current
time in UTC
:return: the rules scheduled to be appplied at `current_dt_utc`
:rtype: list
"""
# make it a timezone-aware object
current_dt_utc = current_dt_utc.replace(tzinfo=pytz.utc)
delta_minute = timedelta(minutes=1)
scheduled_rules = []
for rule in rules:
is_scheduled = True
schedule = rule.get('schedule', {})
if schedule:
# adjust current time to the schedule's timezone
tz_name = schedule.get('time_zone')
schedule_tz = pytz.timezone(tz_name) if tz_name else pytz.utc
now_tz_schedule = current_dt_utc.astimezone(tz=schedule_tz)
# Create start and end time-of-day limits. If start time is not
# defined, the beginning of the day is assumed. If end time
# is not defined, the end of the day is assumed (excluding the
# midnight, since at that point a new day has already begun).
hour_of_day_from = schedule.get('hour_of_day_from')
if not hour_of_day_from:
hour_of_day_from = '0000' # might be both '' or None
from_time = set_time(now_tz_schedule, hour_of_day_from)
hour_of_day_to = schedule.get('hour_of_day_to')
if hour_of_day_to:
to_time = set_time(now_tz_schedule, hour_of_day_to)
else:
to_time = set_time(now_tz_schedule, '2359') + delta_minute
# check if the current day of week and time of day both match
day_of_week_matches = Weekdays.is_scheduled_day(
now_tz_schedule, schedule.get('day_of_week', []))
time_of_day_matches = (from_time <= now_tz_schedule < to_time)
is_scheduled = (day_of_week_matches and time_of_day_matches)
if is_scheduled:
scheduled_rules.append(rule)
return scheduled_rules
def __fetch(self, ingest_item, destinations):
"""
Fetch to item to the destinations
:param item: item to be fetched
:param destinations: list of desk and stage
"""
archive_items = []
for destination in destinations:
try:
item_id = get_resource_service('fetch') \
.fetch([{config.ID_FIELD: ingest_item[config.ID_FIELD],
'desk': str(destination.get('desk')),
'stage': str(destination.get('stage')),
'state': CONTENT_STATE.ROUTED,
'macro': destination.get('macro', None)}])[0]
archive_items.append(item_id)
except:
logger.exception("Failed to fetch item %s to desk %s" % (ingest_item['guid'], destination))
return archive_items
def __publish(self, ingest_item, destinations):
"""
Fetches the item to the desk and then publishes the item.
:param item: item to be published
:param destinations: list of desk and stage
"""
items_to_publish = self.__fetch(ingest_item, destinations)
for item in items_to_publish:
try:
get_resource_service('archive_publish').patch(item, {'auto_publish': True})
except:
logger.exception("Failed to publish item %s." % item)
| agpl-3.0 |
horance-liu/tensorflow | tensorflow/contrib/nccl/python/ops/nccl_ops.py | 23 | 8121 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.nccl.ops import gen_nccl_ops
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_nccl_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_nccl_ops.so'))
def all_sum(tensors):
"""Returns a list of tensors with the all-reduce sum across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
List of tensors, each with the sum of the input tensors, where tensor i has
the same device as `tensors[i]`.
"""
return _apply_all_reduce('sum', tensors)
@ops.RegisterGradient('NcclAllReduce')
def _all_sum_grad(op, grad):
"""The gradients for `all_sum`.
Args:
op: The `all_sum` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `all_sum` op.
Returns:
The gradient with respect to the output of `all_sum`.
Raises:
LookupError: If `reduction` is not `sum`.
"""
if op.get_attr('reduction') != 'sum':
raise LookupError('No gradient defined for NcclAllReduce except sum.')
_check_device(grad, expected=op.device)
num_devices = op.get_attr('num_devices')
shared_name = op.get_attr('shared_name') + '_grad'
with ops.device(op.device):
return gen_nccl_ops.nccl_all_reduce(
input=grad,
reduction='sum',
num_devices=num_devices,
shared_name=shared_name)
def all_prod(tensors):
"""Returns a list of tensors with the all-reduce product across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to multiply; must be assigned
to GPU devices.
Returns:
List of tensors, each with the product of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('prod', tensors)
def all_min(tensors):
"""Returns a list of tensors with the all-reduce min across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the minimum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('min', tensors)
def all_max(tensors):
"""Returns a list of tensors with the all-reduce max across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the maximum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('max', tensors)
def reduce_sum(tensors):
"""Returns a tensor with the reduce sum across `tensors`.
The computation is done with a reduce operation, so only one tensor is
returned.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
A tensor containing the sum of the input tensors.
Raises:
LookupError: If context is not currently using a GPU device.
"""
return _apply_reduce('sum', tensors)
@ops.RegisterGradient('NcclReduce')
def _reduce_sum_grad(op, grad):
"""The gradients for input `Operation` of `reduce_sum`.
Args:
op: The `sum send` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `reduce_sum` op.
Returns:
The gradient with respect to the input of `reduce_sum` op.
Raises:
LookupError: If the reduction attribute of op is not `sum`.
"""
if op.get_attr('reduction') != 'sum':
raise LookupError('No gradient defined for NcclReduce except sum.')
_check_device(grad, expected=op.device)
with ops.device(op.device):
result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)
return [result] * len(op.inputs)
def broadcast(tensor):
"""Returns a tensor that can be efficiently transferred to other devices.
Args:
tensor: The tensor to send; must be assigned to a GPU device.
Returns:
A tensor with the value of `src_tensor`, which can be used as input to
ops on other GPU devices.
"""
_check_graph_mode()
_check_device(tensor)
with ops.device(tensor.device):
return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)
@ops.RegisterGradient('NcclBroadcast')
def _broadcast_grad(op, accumulated_grad):
"""The gradients for input `Operation` of `broadcast`.
Args:
op: The `broadcast send` `Operation` that we are differentiating.
accumulated_grad: Accumulated gradients with respect to the output of the
`broadcast` op.
Returns:
Gradients with respect to the input of `broadcast`.
"""
# Grab inputs of accumulated_grad and replace accumulation with reduce_sum.
grads = [t for t in accumulated_grad.op.inputs]
for t in grads:
_check_device(t)
with ops.device(op.device):
return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')
def _apply_all_reduce(reduction, tensors):
"""Helper function for all_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to all reduce operations')
_check_graph_mode()
shared_name = _get_shared_name()
res = []
for t in tensors:
_check_device(t)
with ops.device(t.device):
res.append(
gen_nccl_ops.nccl_all_reduce(
input=t,
reduction=reduction,
num_devices=len(tensors),
shared_name=shared_name))
return res
def _apply_reduce(reduction, tensors):
"""Helper function for reduce_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to reduce operations')
_check_graph_mode()
for t in tensors:
_check_device(t)
result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction)
try:
next(t for t in tensors if t.device == result.device)
except StopIteration:
raise ValueError('One input tensor must be assigned to current device')
return result
_lock = threading.Lock()
_shared_name_counter = 0
def _get_shared_name():
global _shared_name_counter
with _lock:
val = _shared_name_counter
_shared_name_counter += 1
return 'c%s' % val
def _check_device(tensor, expected=None):
if not device.canonical_name(tensor.device):
raise ValueError('Device assignment required for nccl collective ops')
if expected and expected != tensor.device:
raise ValueError('Expected device %s, got %s' % (expected, tensor.device))
def _check_graph_mode():
if context.in_eager_mode():
raise ValueError('Nccl ops are not supported in eager mode')
| apache-2.0 |
xiandiancloud/edxplaltfom-xusong | common/lib/xmodule/xmodule/modulestore/tests/test_xml.py | 30 | 4708 | """
Tests around our XML modulestore, including importing
well-formed and not-well-formed XML.
"""
import os.path
import unittest
from glob import glob
from mock import patch
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.tests import DATA_DIR
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
def glob_tildes_at_end(path):
"""
A wrapper for the `glob.glob` function, but it always returns
files that end in a tilde (~) at the end of the list of results.
"""
result = glob(path)
with_tildes = [f for f in result if f.endswith("~")]
no_tildes = [f for f in result if not f.endswith("~")]
return no_tildes + with_tildes
class TestXMLModuleStore(unittest.TestCase):
"""
Test around the XML modulestore
"""
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
self.assertEqual(store.get_modulestore_type(), ModuleStoreEnum.Type.xml)
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with self.assertRaises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'], load_error_modules=False)
# Look up the errors during load. There should be none.
errors = modulestore.get_course_errors(SlashSeparatedCourseKey("edX", "toy", "2012_Fall"))
assert errors == []
@patch("xmodule.modulestore.xml.glob.glob", side_effect=glob_tildes_at_end)
def test_tilde_files_ignored(self, _fake_glob):
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['tilde'], load_error_modules=False)
about_location = SlashSeparatedCourseKey('edX', 'tilde', '2012_Fall').make_usage_key(
'about', 'index',
)
about_module = modulestore.get_item(about_location)
self.assertIn("GREEN", about_module.data)
self.assertNotIn("RED", about_module.data)
def test_get_courses_for_wiki(self):
"""
Test the get_courses_for_wiki method
"""
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
for course in store.get_courses():
course_locations = store.get_courses_for_wiki(course.wiki_slug)
self.assertEqual(len(course_locations), 1)
self.assertIn(course.location.course_key, course_locations)
course_locations = store.get_courses_for_wiki('no_such_wiki')
self.assertEqual(len(course_locations), 0)
# now set toy course to share the wiki with simple course
toy_course = store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
toy_course.wiki_slug = 'simple'
course_locations = store.get_courses_for_wiki('toy')
self.assertEqual(len(course_locations), 0)
course_locations = store.get_courses_for_wiki('simple')
self.assertEqual(len(course_locations), 2)
for course_number in ['toy', 'simple']:
self.assertIn(SlashSeparatedCourseKey('edX', course_number, '2012_Fall'), course_locations)
def test_has_course(self):
"""
Test the has_course method
"""
check_has_course_method(
XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple']),
SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),
locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS
)
def test_branch_setting(self):
"""
Test the branch setting context manager
"""
store = XMLModuleStore(DATA_DIR, course_dirs=['toy'])
course_key = store.get_courses()[0]
# XML store allows published_only branch setting
with store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
store.get_item(course_key.location)
# XML store does NOT allow draft_preferred branch setting
with self.assertRaises(ValueError):
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
# verify that the above context manager raises a ValueError
pass # pragma: no cover
| agpl-3.0 |
junghans/lammps | tools/moltemplate/examples/coarse_grained/protein_folding_examples/1bead+chaperone/frustrated+minichaperone/moltemplate_files/generate_tables/calc_dihedral_table.py | 40 | 2709 | #!/usr/bin/env python
# Calculate a table of dihedral angle interactions used in the alpha-helix
# and beta-sheet regions of the frustrated protein model described in
# provided in figure 8 of the supplemental materials section of:
# AI Jewett, A Baumketner and J-E Shea, PNAS, 101 (36), 13192-13197, (2004)
# Note that the "A" and "B" parameters were incorrectly reported to be
# 5.4*epsilon and 6.0*epsilon. The values used were 5.6 and 6.0 epsilon.
# The phiA and phiB values were 57.29577951308232 degrees (1 rad)
# and 180 degrees, respectively. Both expA and expB were 6.0.
#
# To generate the table used for the alpha-helix (1 degree resolution) use this:
# ./calc_dihedral_table.py 6.0 57.29577951308232 6 5.6 180 6 0.0 359 360
# To generate the table used for the beta-sheets (1 degree resolution) use this:
# ./calc_dihedral_table.py 5.6 57.29577951308232 6 6.0 180 6 0.0 359 360
#
# (If you're curious as to why I set the location of the minima at phi_alpha
# to 1.0 radians (57.2957795 degrees), there was no particularly good reason.
# I think the correct value turns out to be something closer to 50 degrees.)
from math import *
import sys
# The previous version included the repulsive core term
def U(phi, A, phiA, expA, B, phiB, expB, use_radians=False):
conv_units = pi/180.0
if use_radians:
conv_units = 1.0
termA = pow(cos(0.5*(phi-phiA)*conv_units), expA)
termB = pow(cos(0.5*(phi-phiB)*conv_units), expB)
return -A*termA - B*termB
# The previous version included the repulsive core term
def F(phi, A, phiA, expA, B, phiB, expB, use_radians=False):
conv_units = pi/180.0
if use_radians:
conv_units = 1.0
termA = (0.5*sin(0.5*(phi-phiA)*conv_units) *
expA * pow(cos(0.5*(phi-phiA)*conv_units), expA-1.0))
termB = (0.5*sin(0.5*(phi-phiB)*conv_units) *
expB * pow(cos(0.5*(phi-phiB)*conv_units), expB-1.0))
return -conv_units*(A*termA + B*termB)
if len(sys.argv) != 10:
sys.stderr.write("Error: expected 9 arguments:\n"
"\n"
"Usage: "+sys.argv[0]+" A phiA expA B phiB expB phiMin phiMax N\n\n")
sys.exit(-1)
A = float(sys.argv[1])
phiA = float(sys.argv[2])
expA = float(sys.argv[3])
B = float(sys.argv[4])
phiB = float(sys.argv[5])
expB = float(sys.argv[6])
phi_min = float(sys.argv[7])
phi_max = float(sys.argv[8])
N = int(sys.argv[9])
for i in range(0,N):
phi = phi_min + i*(phi_max - phi_min)/(N-1)
U_phi = U(phi, A, phiA, expA, B, phiB, expB, use_radians=False)
F_phi = F(phi, A, phiA, expA, B, phiB, expB, use_radians=False)
print(str(i+1)+' '+str(phi)+' '+str(U_phi)+' '+str(F_phi))
| gpl-2.0 |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_descrtut.py | 10 | 12553 | # This contains most of the executable examples from Guido's descr
# tutorial, once at
#
# http://www.python.org/2.2/descrintro.html
#
# A few examples left implicit in the writeup were fleshed out, a few were
# skipped due to lack of interest (e.g., faking super() by hand isn't
# of much interest anymore), and a few were fiddled to make the output
# deterministic.
from test.test_support import sortdict
import pprint
class defaultdict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_1 = """
Here's the new type at work:
>>> print defaultdict # show our type
<class 'test.test_descrtut.defaultdict'>
>>> print type(defaultdict) # its metatype
<type 'type'>
>>> a = defaultdict(default=0.0) # create an instance
>>> print a # show the instance
{}
>>> print type(a) # show its type
<class 'test.test_descrtut.defaultdict'>
>>> print a.__class__ # show its class
<class 'test.test_descrtut.defaultdict'>
>>> print type(a) is a.__class__ # its type is its class
True
>>> a[1] = 3.25 # modify the instance
>>> print a # show the new value
{1: 3.25}
>>> print a[1] # show the new item
3.25
>>> print a[0] # a non-existent item
0.0
>>> a.merge({1:100, 2:200}) # use a dict method
>>> print sortdict(a) # show the result
{1: 3.25, 2: 200}
>>>
We can also use the new type in contexts where classic only allows "real"
dictionaries, such as the locals/globals dictionaries for the exec
statement or the built-in function eval():
>>> def sorted(seq):
... seq.sort(key=str)
... return seq
>>> print sorted(a.keys())
[1, 2]
>>> exec "x = 3; print x" in a
3
>>> print sorted(a.keys())
[1, 2, '__builtins__', 'x']
>>> print a['x']
3
>>>
Now I'll show that defaultdict instances have dynamic instance variables,
just like classic classes:
>>> a.default = -1
>>> print a["noway"]
-1
>>> a.default = -1000
>>> print a["noway"]
-1000
>>> 'default' in dir(a)
True
>>> a.x1 = 100
>>> a.x2 = 200
>>> print a.x1
100
>>> d = dir(a)
>>> 'default' in d and 'x1' in d and 'x2' in d
True
>>> print sortdict(a.__dict__)
{'default': -1000, 'x1': 100, 'x2': 200}
>>>
"""
class defaultdict2(dict):
__slots__ = ['default']
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_2 = """
The __slots__ declaration takes a list of instance variables, and reserves
space for exactly these in the instance. When __slots__ is used, other
instance variables cannot be assigned to:
>>> a = defaultdict2(default=0.0)
>>> a[1]
0.0
>>> a.default = -1
>>> a[1]
-1
>>> a.x1 = 1
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'defaultdict2' object has no attribute 'x1'
>>>
"""
test_3 = """
Introspecting instances of built-in types
For instance of built-in types, x.__class__ is now the same as type(x):
>>> type([])
<type 'list'>
>>> [].__class__
<type 'list'>
>>> list
<type 'list'>
>>> isinstance([], list)
True
>>> isinstance([], dict)
False
>>> isinstance([], object)
True
>>>
Under the new proposal, the __methods__ attribute no longer exists:
>>> [].__methods__
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'list' object has no attribute '__methods__'
>>>
Instead, you can get the same information from the list type:
>>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted
['__add__',
'__class__',
'__contains__',
'__delattr__',
'__delitem__',
'__delslice__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getitem__',
'__getslice__',
'__gt__',
'__hash__',
'__iadd__',
'__imul__',
'__init__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__mul__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__reversed__',
'__rmul__',
'__setattr__',
'__setitem__',
'__setslice__',
'__sizeof__',
'__str__',
'__subclasshook__',
'append',
'count',
'extend',
'index',
'insert',
'pop',
'remove',
'reverse',
'sort']
The new introspection API gives more information than the old one: in
addition to the regular methods, it also shows the methods that are
normally invoked through special notations, e.g. __iadd__ (+=), __len__
(len), __ne__ (!=). You can invoke any method from this list directly:
>>> a = ['tic', 'tac']
>>> list.__len__(a) # same as len(a)
2
>>> a.__len__() # ditto
2
>>> list.append(a, 'toe') # same as a.append('toe')
>>> a
['tic', 'tac', 'toe']
>>>
This is just like it is for user-defined classes.
"""
test_4 = """
Static methods and class methods
The new introspection API makes it possible to add static methods and class
methods. Static methods are easy to describe: they behave pretty much like
static methods in C++ or Java. Here's an example:
>>> class C:
...
... @staticmethod
... def foo(x, y):
... print "staticmethod", x, y
>>> C.foo(1, 2)
staticmethod 1 2
>>> c = C()
>>> c.foo(1, 2)
staticmethod 1 2
Class methods use a similar pattern to declare methods that receive an
implicit first argument that is the *class* for which they are invoked.
>>> class C:
... @classmethod
... def foo(cls, y):
... print "classmethod", cls, y
>>> C.foo(1)
classmethod test.test_descrtut.C 1
>>> c = C()
>>> c.foo(1)
classmethod test.test_descrtut.C 1
>>> class D(C):
... pass
>>> D.foo(1)
classmethod test.test_descrtut.D 1
>>> d = D()
>>> d.foo(1)
classmethod test.test_descrtut.D 1
This prints "classmethod __main__.D 1" both times; in other words, the
class passed as the first argument of foo() is the class involved in the
call, not the class involved in the definition of foo().
But notice this:
>>> class E(C):
... @classmethod
... def foo(cls, y): # override C.foo
... print "E.foo() called"
... C.foo(y)
>>> E.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
>>> e = E()
>>> e.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
In this example, the call to C.foo() from E.foo() will see class C as its
first argument, not class E. This is to be expected, since the call
specifies the class C. But it stresses the difference between these class
methods and methods defined in metaclasses (where an upcall to a metamethod
would pass the target class as an explicit first argument).
"""
test_5 = """
Attributes defined by get/set methods
>>> class property(object):
...
... def __init__(self, get, set=None):
... self.__get = get
... self.__set = set
...
... def __get__(self, inst, type=None):
... return self.__get(inst)
...
... def __set__(self, inst, value):
... if self.__set is None:
... raise AttributeError, "this attribute is read-only"
... return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
getx() and and setx():
>>> class C(object):
...
... def __init__(self):
... self.__x = 0
...
... def getx(self):
... return self.__x
...
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
...
... x = property(getx, setx)
Here's a small demonstration:
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
Hmm -- property is builtin now, so let's try it that way too.
>>> del property # unmask the builtin
>>> property
<type 'property'>
>>> class C(object):
... def __init__(self):
... self.__x = 0
... def getx(self):
... return self.__x
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
... x = property(getx, setx)
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
"""
test_6 = """
Method resolution order
This example is implicit in the writeup.
>>> class A: # classic class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called A.save()
>>> class A(object): # new class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called C.save()
"""
class A(object):
def m(self):
return "A"
class B(A):
def m(self):
return "B" + super(B, self).m()
class C(A):
def m(self):
return "C" + super(C, self).m()
class D(C, B):
def m(self):
return "D" + super(D, self).m()
test_7 = """
Cooperative methods and "super"
>>> print D().m() # "DCBA"
DCBA
"""
test_8 = """
Backwards incompatibilities
>>> class A:
... def foo(self):
... print "called A.foo()"
>>> class B(A):
... pass
>>> class C(A):
... def foo(self):
... B.foo(self)
>>> C().foo()
Traceback (most recent call last):
...
TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
>>> class C(A):
... def foo(self):
... A.foo(self)
>>> C().foo()
called A.foo()
"""
__test__ = {"tut1": test_1,
"tut2": test_2,
"tut3": test_3,
"tut4": test_4,
"tut5": test_5,
"tut6": test_6,
"tut7": test_7,
"tut8": test_8}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
# Obscure: import this module as test.test_descrtut instead of as
# plain test_descrtut because the name of this module works its way
# into the doctest examples, and unless the full test.test_descrtut
# business is used the name can change depending on how the test is
# invoked.
from test import test_support, test_descrtut
test_support.run_doctest(test_descrtut, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| mit |
berendkleinhaneveld/VTK | ThirdParty/AutobahnPython/autobahn/wamp/dealer.py | 16 | 13691 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from autobahn import util
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import message
from autobahn.wamp.exception import ProtocolError, ApplicationError
from autobahn.wamp.interfaces import IDealer, IRouter
from autobahn.wamp.message import _URI_PAT_STRICT_NON_EMPTY, _URI_PAT_LOOSE_NON_EMPTY
class Dealer:
"""
Basic WAMP dealer, implements :class:`autobahn.wamp.interfaces.IDealer`.
"""
def __init__(self, router, options):
"""
Constructor.
:param router: The router this dealer is part of.
:type router: Object that implements :class:`autobahn.wamp.interfaces.IRouter`.
:param options: Router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self._router = router
self._options = options or types.RouterOptions()
## map: session -> set(registration)
## needed for removeSession
self._session_to_registrations = {}
## map: session_id -> session
## needed for exclude/eligible
self._session_id_to_session = {}
## map: procedure -> (registration, session)
self._procs_to_regs = {}
## map: registration -> procedure
self._regs_to_procs = {}
## pending callee invocation requests
self._invocations = {}
## check all procedure URIs with strict rules
self._option_uri_strict = self._options.uri_check == types.RouterOptions.URI_CHECK_STRICT
## supported features from "WAMP Advanced Profile"
self._role_features = role.RoleDealerFeatures(caller_identification = True, progressive_call_results = True)
def attach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.attach`
"""
assert(session not in self._session_to_registrations)
self._session_to_registrations[session] = set()
self._session_id_to_session[session._session_id] = session
def detach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.detach`
"""
assert(session in self._session_to_registrations)
for registration in self._session_to_registrations[session]:
del self._procs_to_regs[self._regs_to_procs[registration]]
del self._regs_to_procs[registration]
del self._session_to_registrations[session]
del self._session_id_to_session[session._session_id]
def processRegister(self, session, register):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processRegister`
"""
assert(session in self._session_to_registrations)
## check procedure URI
##
if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(register.procedure)) or \
( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(register.procedure)):
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.INVALID_URI, ["register for invalid procedure URI '{}'".format(register.procedure)])
session._transport.send(reply)
else:
if not register.procedure in self._procs_to_regs:
## authorize action
##
d = self._as_future(self._router.authorize, session, register.procedure, IRouter.ACTION_REGISTER)
def on_authorize_success(authorized):
if authorized:
registration_id = util.id()
self._procs_to_regs[register.procedure] = (registration_id, session, register.discloseCaller)
self._regs_to_procs[registration_id] = register.procedure
self._session_to_registrations[session].add(registration_id)
reply = message.Registered(register.request, registration_id)
else:
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.NOT_AUTHORIZED, ["session is not authorized to register procedure '{}'".format(register.procedure)])
session._transport.send(reply)
def on_authorize_error(err):
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.AUTHORIZATION_FAILED, ["failed to authorize session for registering procedure '{}': {}".format(register.procedure, err.value)])
session._transport.send(reply)
self._add_future_callbacks(d, on_authorize_success, on_authorize_error)
else:
reply = message.Error(message.Register.MESSAGE_TYPE, register.request, ApplicationError.PROCEDURE_ALREADY_EXISTS, ["register for already registered procedure '{}'".format(register.procedure)])
session._transport.send(reply)
def processUnregister(self, session, unregister):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processUnregister`
"""
assert(session in self._session_to_registrations)
if unregister.registration in self._regs_to_procs:
## map registration ID to procedure URI
procedure = self._regs_to_procs[unregister.registration]
## get the session that originally registered the procedure
_, reg_session, _ = self._procs_to_regs[procedure]
if session != reg_session:
## procedure was registered by a different session!
##
reply = message.Error(message.Unregister.MESSAGE_TYPE, unregister.request, ApplicationError.NO_SUCH_REGISTRATION)
else:
## alright. the procedure had been registered by the session
## that now wants to unregister it.
##
del self._procs_to_regs[procedure]
del self._regs_to_procs[unregister.registration]
self._session_to_registrations[session].discard(unregister.registration)
reply = message.Unregistered(unregister.request)
else:
reply = message.Error(message.Unregister.MESSAGE_TYPE, unregister.request, ApplicationError.NO_SUCH_REGISTRATION)
session._transport.send(reply)
def processCall(self, session, call):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processCall`
"""
assert(session in self._session_to_registrations)
## check procedure URI
##
if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(call.procedure)) or \
( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(call.procedure)):
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.INVALID_URI, ["call with invalid procedure URI '{}'".format(call.procedure)])
session._transport.send(reply)
else:
if call.procedure in self._procs_to_regs:
## validate payload
##
try:
self._router.validate('call', call.procedure, call.args, call.kwargs)
except Exception as e:
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.INVALID_ARGUMENT, ["call of procedure '{}' with invalid application payload: {}".format(call.procedure, e)])
session._transport.send(reply)
return
## authorize action
##
d = self._as_future(self._router.authorize, session, call.procedure, IRouter.ACTION_CALL)
def on_authorize_success(authorized):
if authorized:
registration_id, endpoint_session, discloseCaller = self._procs_to_regs[call.procedure]
request_id = util.id()
if discloseCaller or call.discloseMe:
caller = session._session_id
authid = session._authid
authrole = session._authrole
authmethod = session._authmethod
else:
caller = None
authid = None
authrole = None
authmethod = None
invocation = message.Invocation(request_id,
registration_id,
args = call.args,
kwargs = call.kwargs,
timeout = call.timeout,
receive_progress = call.receive_progress,
caller = caller,
authid = authid,
authrole = authrole,
authmethod = authmethod)
self._invocations[request_id] = (call, session)
endpoint_session._transport.send(invocation)
else:
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.NOT_AUTHORIZED, ["session is not authorized to call procedure '{}'".format(call.procedure)])
session._transport.send(reply)
def on_authorize_error(err):
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.AUTHORIZATION_FAILED, ["failed to authorize session for calling procedure '{}': {}".format(call.procedure, err.value)])
session._transport.send(reply)
self._add_future_callbacks(d, on_authorize_success, on_authorize_error)
else:
reply = message.Error(message.Call.MESSAGE_TYPE, call.request, ApplicationError.NO_SUCH_PROCEDURE, ["no procedure '{}' registered".format(call.procedure)])
session._transport.send(reply)
def processCancel(self, session, cancel):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processCancel`
"""
assert(session in self._session_to_registrations)
raise Exception("not implemented")
def processYield(self, session, yield_):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processYield`
"""
assert(session in self._session_to_registrations)
if yield_.request in self._invocations:
## get original call message and calling session
##
call_msg, call_session = self._invocations[yield_.request]
## validate payload
##
is_valid = True
try:
self._router.validate('call_result', call_msg.procedure, yield_.args, yield_.kwargs)
except Exception as e:
is_valid = False
reply = message.Error(message.Call.MESSAGE_TYPE, call_msg.request, ApplicationError.INVALID_ARGUMENT, ["call result from procedure '{}' with invalid application payload: {}".format(call_msg.procedure, e)])
else:
reply = message.Result(call_msg.request, args = yield_.args, kwargs = yield_.kwargs, progress = yield_.progress)
## the calling session might have been lost in the meantime ..
##
if call_session._transport:
call_session._transport.send(reply)
## the call is done if it's a regular call (non-progressive) or if the payload was invalid
##
if not yield_.progress or not is_valid:
del self._invocations[yield_.request]
else:
raise ProtocolError("Dealer.onYield(): YIELD received for non-pending request ID {}".format(yield_.request))
def processInvocationError(self, session, error):
"""
Implements :func:`autobahn.wamp.interfaces.IDealer.processInvocationError`
"""
assert(session in self._session_to_registrations)
if error.request in self._invocations:
## get original call message and calling session
##
call_msg, call_session = self._invocations[error.request]
## validate payload
##
try:
self._router.validate('call_error', call_msg.procedure, error.args, error.kwargs)
except Exception as e:
reply = message.Error(message.Call.MESSAGE_TYPE, call_msg.request, ApplicationError.INVALID_ARGUMENT, ["call error from procedure '{}' with invalid application payload: {}".format(call_msg.procedure, e)])
else:
reply = message.Error(message.Call.MESSAGE_TYPE, call_msg.request, error.error, args = error.args, kwargs = error.kwargs)
## the calling session might have been lost in the meantime ..
##
if call_session._transport:
call_session._transport.send(reply)
## the call is done
##
del self._invocations[error.request]
else:
raise ProtocolError("Dealer.onInvocationError(): ERROR received for non-pending request_type {} and request ID {}".format(error.request_type, error.request))
IDealer.register(Dealer)
| bsd-3-clause |
glennlive/gnuradio-wg-grc | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
RHInception/re-cfg-seed | test/test_re_cfg_seed.py | 1 | 6904 | # Copyright (C) 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Unittests.
"""
import mock
import requests
from . import TestCase
from contextlib import nested
import recfgseed
class TestMakePassword(TestCase):
def test_make_password(self):
"""
Verify making passwords works like it should.
"""
# Default length is 32
passwd = recfgseed.make_password()
assert len(passwd) == 32
# We should be able to specify a length
passwd = recfgseed.make_password(10)
assert len(passwd) == 10
class TestReCFGSeed(TestCase):
def test_create_seed_manager(self):
"""
Verify creation of SeedManager reacts as it should.
"""
# The default endpoint should be this
sm = recfgseed.SeedManager()
self.assertEquals(sm._endpoint, 'http://127.0.0.1:4001')
self.assertEquals(sm.keyendpoint, 'http://127.0.0.1:4001')
# We should be able to override the endpoint
sm = recfgseed.SeedManager('https://127.0.0.2:6001/')
self.assertEquals(sm._endpoint, 'https://127.0.0.2:6001/')
self.assertEquals(sm.keyendpoint, 'https://127.0.0.2:6001/')
def test_update_content(self):
"""
Verify the content in update_content is actually updated.
"""
with mock.patch('requests.get') as _get:
resp = requests.Response()
resp.status_code = 200
resp._content = '{"node": {"value": "test"}}'
_get.return_value = resp
sm = recfgseed.SeedManager()
result = sm.update_content({'akey': {}}, {'akey': '___'})
assert result['akey'] == 'test'
def test_set_key(self):
"""
Verify setting keys works like it should.
"""
with mock.patch('requests.put') as _put:
resp = requests.Response()
resp.status_code = 201
_put.return_value = resp
sm = recfgseed.SeedManager()
result = sm.set_key('key', 'value')
assert result['name'] == 'key'
assert result['value'] == 'value'
def test_set_key_failure(self):
"""
Verify set_key fails with an exception.
"""
with mock.patch('requests.put') as _put:
resp = requests.Response()
resp.status_code = 404
_put.return_value = resp
sm = recfgseed.SeedManager()
self.assertRaises(Exception, sm.set_key, 'key', 'value')
def test_get_key_with_value(self):
"""
Verify get_key returns the value from the server.
"""
with mock.patch('requests.get') as _get:
resp = requests.Response()
resp.status_code = 200
resp._content = '{"node": {"value": "test"}}'
_get.return_value = resp
sm = recfgseed.SeedManager()
result = sm.get_key('key')
assert result['name'] == 'key'
assert result['value'] == 'test'
def test_get_key_with_default_but_no_value(self):
"""
Verify get_key returns the default value and creates it on the server.
"""
with nested(
mock.patch('requests.get'),
mock.patch('requests.put')) as (_get, _put):
resp = requests.Response()
resp.status_code = 404
_get.return_value = resp
presp = requests.Response()
presp.status_code = 201
_put.return_value = presp
sm = recfgseed.SeedManager()
result = sm.get_key('key', 'default')
assert result['name'] == 'key'
assert result['value'] == 'default'
assert _put.called_once()
def test_get_key_as_password_with_no_default_or_value(self):
"""
Verify that the password keys get generated and created on the server.
"""
with nested(
mock.patch('requests.get'),
mock.patch('requests.put'),
mock.patch('recfgseed.make_password')) as (
_get, _put, _make_password):
resp = requests.Response()
resp.status_code = 404
_get.return_value = resp
presp = requests.Response()
presp.status_code = 201
_put.return_value = presp
_make_password.return_value = 'from_make_password'
sm = recfgseed.SeedManager()
result = sm.get_key('key', password=True)
assert result['name'] == 'key'
assert result['value'] == 'from_make_password'
_put.assert_called_once()
_make_password.assert_called_once()
def test_get_key_with_no_default_or_value(self):
"""
Verify get_key fails with an exception when there is no value and no default.
"""
with mock.patch('requests.get') as _get:
resp = requests.Response()
resp.status_code = 404
_get.return_value = resp
sm = recfgseed.SeedManager()
self.assertRaises(Exception, sm.get_key, 'key')
def test_templatize(self):
"""
Verify templates can be used.
"""
with mock.patch('requests.get') as _get:
resp = requests.Response()
resp.status_code = 200
resp._content = '{"node": {"value": "test"}}'
_get.return_value = resp
sm = recfgseed.SeedManager()
result = sm.templatize({'akey': {}}, 'This is a test: {{ akey }}.')
self.assertEquals(str(result), 'This is a test: test.')
def test_casting(self):
"""
Verify casting works.
"""
with mock.patch('requests.get') as _get:
resp = requests.Response()
resp.status_code = 200
resp._content = '{"node": {"value": "1234"}}'
_get.return_value = resp
sm = recfgseed.SeedManager()
result = sm.update_content({'akey': {'type': 'int'}}, {'akey': '___'})
assert result['akey'] == 1234
self.assertRaises(
KeyError,
sm.update_content,
{'newkey': {'type': 'asdasd'}}, {'akey': '___'})
| agpl-3.0 |
dexterx17/nodoSocket | clients/Python-2.7.6/Lib/bsddb/test/test_join.py | 111 | 3168 | """TestCases for using the DB.join and DBCursor.join_item methods.
"""
import os
import unittest
from test_all import db, dbshelve, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
ProductIndex = [
('apple', "Convenience Store"),
('blueberry', "Farmer's Market"),
('shotgun', "S-Mart"), # Aisle 12
('pear', "Farmer's Market"),
('chainsaw', "S-Mart"), # "Shop smart. Shop S-Mart!"
('strawberry', "Farmer's Market"),
]
ColorIndex = [
('blue', "blueberry"),
('red', "apple"),
('red', "chainsaw"),
('red', "strawberry"),
('yellow', "peach"),
('yellow', "pear"),
('black', "shotgun"),
]
class JoinTestCase(unittest.TestCase):
keytype = ''
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_join(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_join..." % \
self.__class__.__name__
# create and populate primary index
priDB = db.DB(self.env)
priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
map(lambda t, priDB=priDB: priDB.put(*t), ProductIndex)
# create and populate secondary index
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
map(lambda t, secDB=secDB: secDB.put(*t), ColorIndex)
sCursor = None
jCursor = None
try:
# lets look up all of the red Products
sCursor = secDB.cursor()
# Don't do the .set() in an assert, or you can get a bogus failure
# when running python -O
tmp = sCursor.set('red')
self.assertTrue(tmp)
# FIXME: jCursor doesn't properly hold a reference to its
# cursors, if they are closed before jcursor is used it
# can cause a crash.
jCursor = priDB.join([sCursor])
if jCursor.get(0) != ('apple', "Convenience Store"):
self.fail("join cursor positioned wrong")
if jCursor.join_item() != 'chainsaw':
self.fail("DBCursor.join_item returned wrong item")
if jCursor.get(0)[0] != 'strawberry':
self.fail("join cursor returned wrong thing")
if jCursor.get(0): # there were only three red items to return
self.fail("join cursor returned too many items")
finally:
if jCursor:
jCursor.close()
if sCursor:
sCursor.close()
priDB.close()
secDB.close()
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(JoinTestCase))
return suite
| mit |
ntuecon/server | pyenv/Lib/site-packages/pythonwin/pywin/debugger/__init__.py | 4 | 2833 | import sys
# Some cruft to deal with the Pythonwin GUI booting up from a non GUI app.
def _MakeDebuggerGUI():
app.InitInstance()
isInprocApp = -1
def _CheckNeedGUI():
global isInprocApp
if isInprocApp==-1:
import win32ui
isInprocApp = win32ui.GetApp().IsInproc()
if isInprocApp:
# MAY Need it - may already have one
need = "pywin.debugger.dbgpyapp" not in sys.modules
else:
need = 0
if need:
import pywin.framework.app
import dbgpyapp
pywin.framework.app.CreateDefaultGUI(dbgpyapp.DebuggerPythonApp)
else:
# Check we have the appropriate editor
# No longer necessary!
pass
return need
# Inject some methods in the top level name-space.
currentDebugger = None # Wipe out any old one on reload.
def _GetCurrentDebugger():
global currentDebugger
if currentDebugger is None:
_CheckNeedGUI()
import debugger
currentDebugger = debugger.Debugger()
return currentDebugger
def GetDebugger():
# An error here is not nice - as we are probably trying to
# break into the debugger on a Python error, any
# error raised by this is usually silent, and causes
# big problems later!
try:
rc = _GetCurrentDebugger()
rc.GUICheckInit()
return rc
except:
print "Could not create the debugger!"
import traceback
traceback.print_exc()
return None
def close():
if currentDebugger is not None:
currentDebugger.close()
def run(cmd,globals=None, locals=None, start_stepping = 1):
_GetCurrentDebugger().run(cmd, globals,locals, start_stepping)
def runeval(expression, globals=None, locals=None):
return _GetCurrentDebugger().runeval(expression, globals, locals)
def runcall(*args):
return _GetCurrentDebugger().runcall(*args)
def set_trace():
import sys
d = _GetCurrentDebugger()
if d.frameShutdown: return # App closing
if d.stopframe != d.botframe:
# If im not "running"
return
sys.settrace(None) # May be hooked
d.reset()
d.set_trace()
# "brk" is an alias for "set_trace" ("break" is a reserved word :-(
brk = set_trace
# Post-Mortem interface
def post_mortem(t=None):
if t is None:
t = sys.exc_info()[2] # Will be valid if we are called from an except handler.
if t is None:
try:
t = sys.last_traceback
except AttributeError:
print "No traceback can be found from which to perform post-mortem debugging!"
print "No debugging can continue"
return
p = _GetCurrentDebugger()
if p.frameShutdown: return # App closing
# No idea why I need to settrace to None - it should have been reset by now?
sys.settrace(None)
p.reset()
while t.tb_next != None: t = t.tb_next
p.bAtPostMortem = 1
p.prep_run(None)
try:
p.interaction(t.tb_frame, t)
finally:
t = None
p.bAtPostMortem = 0
p.done_run()
def pm(t=None):
post_mortem(t)
| bsd-3-clause |
40223214/-2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/types.py | 756 | 3167 | """
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
| agpl-3.0 |
mjschultz/django-password-policies | setup.py | 1 | 1293 | from setuptools import setup, find_packages
install_requires=['django>=1.5', 'django-easysettings', 'pytz']
try:
import importlib
except ImportError:
install_requires.append('importlib')
setup(
name='django-password-policies',
version=__import__('password_policies').__version__,
description='A Django application to implent password policies.',
long_description="""\
django-password-policies is an application for the Django framework that
provides unicode-aware password policies on password changes and resets
and a mechanism to force password changes.
""",
author='Tarak Blah',
author_email='halbkarat@gmail.com',
url='https://github.com/tarak/django-password-policies',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
install_requires=install_requires,
test_suite='tests.runtests',
)
| bsd-3-clause |
strets123/acascraper | setup.py | 1 | 1573 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
'selenium',
'django',
'BeautifulSoup4',
'requests',
]
test_requirements = [
# TODO: put package test requirements here
'selenium',
'django',
'BeautifulSoup4',
'requests',
]
setup(
name='acascraper',
version='0.1.0',
description='Academic social network scraper package',
long_description=readme + '\n\n' + history,
author='Andrew Stretton',
author_email='andrew.stretton@sgc.ox.ac.uk',
url='https://github.com/strets123/acascraper',
packages=[
'acascraper',
],
package_dir={'acascraper':
'acascraper'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='acascraper',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
tests_require=test_requirements
) | bsd-3-clause |
PaulWay/insights-core | insights/combiners/journald_conf.py | 1 | 10493 | """
Journald configuration
======================
Combiner for parsing of journald configuration. man journald.conf describes where various
journald config files can reside and how they take precedence one over another. The combiner
implements the logic and provides an interface for querying active settings.
The journald.conf file is a key=value file with hash comments.
The parsers this combiner uses process only active settings (lines that are not commented out). The
resulting settings (after being processed by the precedence evaluation algorithm) are then provided
by the `get_active_settings_value` method and `active_settings` dictionary and by the
`get_active_setting_value_and_file_name` method and `active_settings_with_file_name` dictionary.
Options that are commented out are not returned - a rule using this parser has to be aware of which
default value is assumed by systemd if the particular option is not specified.
Priority from lowest to highest:
* built-in defaults (the same as the default commented entries in /etc/systemd/journald.conf)
* /etc/systemd/journald.conf
* \*.conf in whatever directory in lexicographic order from lowest to highest
* if two \*.conf files with the same name are both in /usr/lib and /etc, the file in /etc wholly
overwrites the file in /usr/lib
from man journald.conf in RHEL 7.3:
CONFIGURATION DIRECTORIES AND PRECEDENCE
Default configuration is defined during compilation, so a configuration
file is only needed when it is necessary to deviate from those
defaults. By default the configuration file in /etc/systemd/ contains
commented out entries showing the defaults as a guide to the
administrator. This file can be edited to create local overrides.
When packages need to customize the configuration, they can install
configuration snippets in /usr/lib/systemd/\*.conf.d/. Files in /etc/
are reserved for the local administrator, who may use this logic to
override the configuration files installed by vendor packages. The main
configuration file is read before any of the configuration directories,
and has the lowest precedence; entries in a file in any configuration
directory override entries in the single configuration file. Files in
the \*.conf.d/ configuration subdirectories are sorted by their filename
in lexicographic order, regardless of which of the subdirectories they
reside in. If multiple files specify the same option, the entry in the
file with the lexicographically latest name takes precedence. It is
recommended to prefix all filenames in those subdirectories with a
two-digit number and a dash, to simplify the ordering of the files.
To disable a configuration file supplied by the vendor, the recommended
way is to place a symlink to /dev/null in the configuration directory
in /etc/, with the same filename as the vendor configuration file.
Examples:
>>> conf = shared[JournaldConfAll]
>>> conf.get_active_setting_value('Storage')
'auto'
>>> 'Storage' in conf.active_settings_with_file_name
True
>>> conf.get_active_setting_value_and_file_name('Storage')
('auto', '/etc/systemd/journald.conf')
"""
from insights.core.plugins import combiner
from insights.parsers.journald_conf import EtcJournaldConf, EtcJournaldConfD, UsrJournaldConfD
# TODO - further insights work - convert this to a generic option & file priority evaluator for
# other combiners.
@combiner(requires=[EtcJournaldConf], optional=[EtcJournaldConfD, UsrJournaldConfD])
class JournaldConfAll(object):
"""
Combiner for accessing files from the parsers EtcJournaldConf, EtcJournaldConfD, UsrJournaldConfD
and evaluating effective active settings based on the rules of file priority and file shadowing
as described in man journald.conf.
Can be later refactored to a combiner for parsing all configuration files with key=option lines,
like journald files.
Rules of evaluation:
* Files from EtcJournaldConfD wholly shadow/overwrite files from UsrJournaldConfD with identical
names.
* Files ordered by name from lowest priority to highest (a.conf has lower priority than b.conf).
* Option values overwritten by the file with the highest priority.
* The one central file has either the lowest priority or the highest priority, based on the
central_file_lowest_prio argument.
That is:
* An entire file in UsrJournaldConfD is overwritten by a same-named file from EtcJournaldConfD.
* A single option value is overwritten when another file with a higher priority has an option
with the same option name.
Example of file precedence::
/etc/systemd/journald.conf:
key0=value0
key1=value1
/usr/lib/systemd/journald.conf.d/a.conf:
key2=value2
key3=value3
key4=value4
key1=value5
/usr/lib/systemd/journald.conf.d/b.conf:
key5=value6
key6=value7
key1=value8
key2=value9
key4=value10
/usr/lib/systemd/journald.conf.d/c.conf:
key7=value11
key5=value12
key1=value13
/etc/systemd/journald.conf.d/b.conf:
key1=value14
key5=value15
the resulting configuration:
key0=value0
key1=value13 # c.conf has highest priority
key2=value2 # b.conf from /usr is shadowed by b.conf from /etc so value from a.conf is used
key3=value3
key4=value4 # b.conf from /usr is shadowed by b.conf from /etc so value from a.conf is used
key5=value12 # c.conf has higher priority than b.conf
# key6 doesn't exist because b.conf from /usr is shadowed by b.conf from /etc
key7=value11
"""
def __init__(self, local, shared):
# preparation for future possible refactoring into a more general combiner
central_file_lowest_prio = True
# comments in this method describe journald configuration; it should work for similar ones
etc_confd = {} # parser instances indexed by file name
usr_confd = {} # parser instances indexed by file name
if EtcJournaldConfD in shared:
for parser_instance in shared[EtcJournaldConfD]:
etc_confd[parser_instance.file_name] = parser_instance
if UsrJournaldConfD in shared:
for parser_instance in shared[UsrJournaldConfD]:
usr_confd[parser_instance.file_name] = parser_instance
files_shadowed_not_used = set() # full file paths of files that are shadowed by others
effective_confd = {} # deduplicated *.conf files, taking shadowing /usr by /etc into account
for file_name, parser_instance in usr_confd.iteritems():
effective_confd[file_name] = parser_instance
# /etc/systemd/journald.conf.d/*.conf shadow /usr/lib/systemd/journald.conf.d/*.conf files
# with the same name. The following loop overwrites these same-named files by their /etc
# counterparts:
for file_name, parser_instance in etc_confd.iteritems():
if file_name in effective_confd:
shadowed_file_name = effective_confd[file_name].file_path
if shadowed_file_name:
# empty and None file names are not added (that is invalid anyway)
files_shadowed_not_used.add(shadowed_file_name)
effective_confd[file_name] = parser_instance
files_shadowed_not_used = sorted(files_shadowed_not_used) # deterministic behavior, sorted paths
sorted_file_names = sorted(effective_confd.keys())
central_parser = shared[EtcJournaldConf][0]
parsers_list = [effective_confd[file_name] for file_name in sorted_file_names]
if central_file_lowest_prio:
parsers_list = [central_parser] + parsers_list
else:
parsers_list = parsers_list + [central_parser]
files_used_priority_order = [] # from lowest to highest priority, not including empty files
# storing only the active values as (val, file_name), taking precedence rules into account
resulting_options_with_file_name = {}
# *.conf files from the lowest priority to the highest, so that the last value stays
for parser_instance in parsers_list:
if parser_instance.active_settings: # do not iterate if empty or None
if parser_instance.file_path:
# empty and None file names are not added (that is invalid anyway), see test_11
files_used_priority_order.append(parser_instance.file_path)
for k, v in parser_instance.active_settings.iteritems():
resulting_options_with_file_name[k] = (v, parser_instance.file_path)
# not named simply as `active_settings` so as not to confuse the contents with the
# `active_settings` dictionary in parsers/journald_conf.py
# (dict[str, str] vs. dict[str, tuple[str, str]])
self.active_settings_with_file_name = resulting_options_with_file_name
# Not saving directly to self so that if this function fails in the middle, nothing is saved
# for the offshoot chance that an exception would be swallowed and the invalid instance
# used - prevents incomplete data from being used.
self.files_shadowed_not_used = files_shadowed_not_used
self.files_used_priority_order = files_used_priority_order
super(JournaldConfAll, self).__init__()
def get_active_setting_value(self, setting_name):
"""
Access active setting value by setting name.
Args:
setting_name (string): Setting name
"""
return self.active_settings_with_file_name[setting_name][0]
def get_active_setting_value_and_file_name(self, setting_name):
"""
Access active setting value by setting name. Returns the active setting value and file name
of the file in which it is defined. Other files that also specify the setting but are
shadowed are ignored and not reported.
Args:
setting_name (string): Setting name
Returns:
tuple[str, str]: setting value, file name
"""
return self.active_settings_with_file_name[setting_name]
| apache-2.0 |
akash1808/nova_test_latest | nova/tests/unit/api/openstack/compute/test_plugin_framework.py | 46 | 1306 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class PluginTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace")
def test_plugin_framework_index(self, mock_namespace):
mock_namespace.return_value = 'nova.api.v3.test_extensions'
app = fakes.wsgi_app_v21(init_only='test-basic')
req = fakes.HTTPRequest.blank('/v2/fake/test')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
| apache-2.0 |
magehost/magento-malware-scanner | tools/convert_yar_to_text.py | 3 | 1286 | #!/usr/bin/env python3
import re
import time
SECTIONS = ('backend', 'frontend')
YARA_RULE = '(rule (\S+) \{.+?\n\})'
NEEDLE = '\$ = (.+)\n'
for section in SECTIONS:
print("Parsing {}".format(section))
with open('rules/{}.yar'.format(section)) as fh:
content = fh.read()
customfh = open('rules/{}-custom.yar'.format(section), 'w')
standardfh = open('rules/{}.txt'.format(section), 'w')
standardfh.write('''
# This file contains the {} malware signatures in two forms:
# - String literals (without backslash escaping)
# - Regexes (enclosed in / / characters)
#
# NB: Regex is slower! So literals are preferred.\n\n\n'''.lstrip().format(section.upper()))
# print("got content with length {}".format(len(content)))
for block, rulename in re.findall(YARA_RULE, content, flags=re.DOTALL):
# print("{} {}".format(rulename, block))
if 'condition: any of them' not in block:
customfh.write(block.strip()+'\n')
continue
standardfh.write('# {}\n'.format(rulename))
for needle in re.findall(NEEDLE, block):
# print("found needle in rulename", rulename, needle)
if needle.startswith('"'): # otherwise, regex
needle = needle[1:-1].decode('string_escape')
standardfh.write('{}\n'.format(needle))
standardfh.write('\n')
customfh.close()
| gpl-3.0 |
pombredanne/http-repo.gem5.org-gem5- | src/dev/x86/X86IntPin.py | 69 | 2483 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.SimObject import SimObject
# A generic pin to drive an interrupt signal generated by a device.
class X86IntSourcePin(SimObject):
type = 'X86IntSourcePin'
cxx_class = 'X86ISA::IntSourcePin'
cxx_header = "dev/x86/intdev.hh"
# A generic pin to receive an interrupt signal generated by another device.
class X86IntSinkPin(SimObject):
type = 'X86IntSinkPin'
cxx_class = 'X86ISA::IntSinkPin'
cxx_header = "dev/x86/intdev.hh"
device = Param.SimObject("Device this pin belongs to")
number = Param.Int("The pin number on the device")
# An interrupt line which is driven by a source pin and drives a sink pin.
class X86IntLine(SimObject):
type = 'X86IntLine'
cxx_class = 'X86ISA::IntLine'
cxx_header = "dev/x86/intdev.hh"
source = Param.X86IntSourcePin("Pin driving this line")
sink = Param.X86IntSinkPin("Pin driven by this line")
| bsd-3-clause |
groutr/numpy | numpy/distutils/fcompiler/pathf95.py | 229 | 1209 | from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['PathScaleFCompiler']
class PathScaleFCompiler(FCompiler):
compiler_type = 'pathf95'
description = 'PathScale Fortran Compiler'
version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
executables = {
'version_cmd' : ["pathf95", "-version"],
'compiler_f77' : ["pathf95", "-fixedform"],
'compiler_fix' : ["pathf95", "-fixedform"],
'compiler_f90' : ["pathf95"],
'linker_so' : ["pathf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
#compiler = PathScaleFCompiler()
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='pathf95')
compiler.customize()
print(compiler.get_version())
| bsd-3-clause |
todd-x86/tkplus | exception.py | 1 | 1522 | from form import Form
from button import Button
from label import Label
from image import Image
from memo import Memo
import os
import Tkinter as tk
import traceback
def handle_exception(ex, stacktrace=None):
err_icon = os.path.join(os.path.dirname(__file__), 'graphics', 'icon_error.gif')
frm = Form(caption='Exception: {}'.format(ex.__class__.__name__),
left=100, top=100, width=350, height=180)
frm.resizable = False
msg = Label(frm, left=45, top=5, width=305, height=40, caption=ex.message)
msg.wordwrap = True
img = Image(frm, left=5, top=15, width=32, height=32, file=err_icon)
trace = Memo(frm, left=5, top=55, width=335, height=90)
trace.text = stacktrace
def close_form():
frm.close()
btn = Button(frm, left=140, top=148, width=65, height=27, caption="Close")
btn.on_click = close_form
frm.show_modal()
def enable_handler():
tk.CallWrapper = ExceptionHandler
class ExceptionHandler(object):
def __init__(self, func, subst, widget):
self._func = func
self._subst = subst
self._widget = widget
def __call__(self, *args):
try:
if self._subst:
return self._subst(*args)
else:
return self._func(*args)
except SystemExit, msg:
raise SystemExit, msg
except Exception as ex:
# TODO: Figure out how to ignore this section of code in stacktrace
handle_exception(ex, traceback.format_exc())
| apache-2.0 |
ml-lab/pylearn2 | pylearn2/testing/skip.py | 49 | 1363 | """
Helper functions for determining which tests to skip.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from nose.plugins.skip import SkipTest
import os
from theano.sandbox import cuda
scipy_works = True
try:
import scipy
except ImportError:
# pyflakes gets mad if you set scipy to None here
scipy_works = False
sklearn_works = True
try:
import sklearn
except ImportError:
sklearn_works = False
h5py_works = True
try:
import h5py
except ImportError:
h5py_works = False
matplotlib_works = True
try:
from matplotlib import pyplot
except ImportError:
matplotlib_works = False
def skip_if_no_data():
if 'PYLEARN2_DATA_PATH' not in os.environ:
raise SkipTest()
def skip_if_no_scipy():
if not scipy_works:
raise SkipTest()
def skip_if_no_sklearn():
if not sklearn_works:
raise SkipTest()
def skip_if_no_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled.')
def skip_if_no_h5py():
if not h5py_works:
raise SkipTest()
def skip_if_no_matplotlib():
if not matplotlib_works:
raise SkipTest("matplotlib and pyplot are not available")
| bsd-3-clause |
lingfliu/smart_tuwa | twrt/testbed/massive_scene_test.py | 1 | 3298 | import socket
import time
import sys
import random
import math
import threading
msg_header = 'AADD'
msg_stamp = '\x00\x00\x00\x00'
msg_id_gw = '2016A008'
msg_id_dev = '00000000'
msg_devtype = '\x01\x00'
msg_auth_key = '88888888'
msg_auth_datatype = '\x1c\x00'
msg_auth = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+msg_auth_datatype+'\x00\x08'+msg_auth_key
#serverAddress = ('192.168.20.104', 9091)
serverAddress = ('localhost', 9091)
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
skt.connect(serverAddress)
length = skt.send(msg_auth)
msg_bak = skt.recv(1024)
print msg_bak
#scene set
for i in range(0,20):
print('create scene' + str(i))
sce_type_val = int(math.ceil(random.random()*3))
sce_type = '%c'%sce_type_val
sce_id_major_val = i #round(random.random()*1000)
sce_id_major = '%08d'%sce_id_major_val
sce_id_minor_val = i #round(random.random()*1000)
sce_id_minor = '%08d'%sce_id_minor_val
sce_mac_val= round(random.random()*1000)
sce_mac = '%08d'%sce_mac_val
sce_name_val = round(random.random()*100)
sce_name = 'scene'+'%04d'%sce_name_val + '\x00'*51
sce_type_val = int(math.ceil(random.random()*4))
sce_type = '%c'%sce_type_val
sce_type +='\x00'*3
sce_trigger_num = 100 #int(random.random()*100)
sce_trigger = ''
for m in range(0, sce_trigger_num):
sce_trigger_val = round(random.random()*100)
sce_trigger += ('%08d'%sce_trigger_val)*6
sce_item_num = int(random.random()*100)
sce_item = ''
for m in range(0, sce_item_num):
sce_item_val = round(random.random()*100)
sce_item += ('%08d'%sce_item_val)*6
body_len_val = 48*sce_item_num + 48*sce_trigger_num + 96
body_len = ''
body_len +='%c'%(int(body_len_val/256))
body_len +='%c'%(body_len_val%256)
msg_set_scene = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+'\x0f\x00'+ body_len + sce_id_major +sce_id_minor+sce_mac+sce_type+sce_name+'%c'%sce_trigger_num + '\x00'*3+'%c'%sce_item_num+'\x00'*3+sce_trigger+sce_item
#print('message length=' + str(len(msg_set_scene)))
#print('body length=' + str(body_len_val))
print (sce_id_major + ' ' + sce_id_minor + ' ' + sce_mac + ' ' + sce_name + ' ' + str(sce_trigger_num) + ' ' + str(sce_item_num) )
#print(str('%c'%sce_trigger_num))
#print(body_len)
#print('msg = ' + msg_set_scene)
m = 0
while(True):
if m+256 < len(msg_set_scene):
pkt = msg_set_scene[m:m+256]
length = skt.send(pkt)
print length
m += 256
time.sleep(0.01)
continue
else:
pkt = msg_set_scene[m:]
length = skt.send(pkt)
time.sleep(0.01)
print length
break
#length = skt.send(msg_set_scene())
msg_bak = skt.recv(1024)
print msg_bak
time.sleep(0.01)
msg_finish_scene = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+'\x11\x00'+'\x00\x01' + '\x00'
print('msg finish = ' + msg_finish_scene)
length = skt.send(msg_finish_scene)
print length
msg_bak = skt.recv(1024)
print msg_bak
#while(True):
#msg_bak = skt.recv(1024)
#print msg_bak
#pass
| apache-2.0 |
petteyg/intellij-community | python/helpers/pydev/pydevd_attach_to_process/_test_attach_to_process_linux.py | 82 | 2779 | '''
This module is just for testing concepts. It should be erased later on.
Experiments:
// gdb -p 4957
// call dlopen("/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/attach_linux.so", 2)
// call dlsym($1, "hello")
// call hello()
// call open("/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/attach_linux.so", 2)
// call mmap(0, 6672, 1 | 2 | 4, 1, 3 , 0)
// add-symbol-file
// cat /proc/pid/maps
// call dlopen("/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/attach_linux.so", 1|8)
// call dlsym($1, "hello")
// call hello()
'''
import subprocess
import sys
import os
import time
if __name__ == '__main__':
linux_dir = os.path.join(os.path.dirname(__file__), 'linux')
os.chdir(linux_dir)
so_location = os.path.join(linux_dir, 'attach_linux.so')
try:
os.remove(so_location)
except:
pass
subprocess.call('g++ -shared -o attach_linux.so -fPIC -nostartfiles attach_linux.c'.split())
print('Finished compiling')
assert os.path.exists('/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/attach_linux.so')
os.chdir(os.path.dirname(linux_dir))
# import attach_pydevd
# attach_pydevd.main(attach_pydevd.process_command_line(['--pid', str(p.pid)]))
p = subprocess.Popen([sys.executable, '-u', '_always_live_program.py'])
print('Size of file: %s' % (os.stat(so_location).st_size))
#(gdb) set architecture
# Requires an argument. Valid arguments are i386, i386:x86-64, i386:x64-32, i8086, i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl, i386:x86-64:nacl, i386:x64-32:nacl, auto.
cmd = [
'gdb',
'--pid',
str(p.pid),
'--batch',
]
arch = 'i386:x86-64'
if arch:
cmd.extend(["--eval-command='set architecture %s'" % arch])
cmd.extend([
"--eval-command='call dlopen(\"/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/attach_linux.so\", 2)'",
"--eval-command='call DoAttach(1, \"print(\\\"check11111check\\\")\", 0)'",
#"--eval-command='call SetSysTraceFunc(1, 0)'", -- never call this way, always use "--command='...gdb_threads_settrace.py'",
#So that threads are all stopped!
"--command='/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/gdb_threads_settrace.py'",
])
cmd.extend(['--command=/home/fabioz/Desktop/dev/PyDev.Debugger/pydevd_attach_to_process/linux/gdb_threads_settrace.py'])
print(' '.join(cmd))
time.sleep(.5)
env = os.environ.copy()
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
p2 = subprocess.call(' '.join(cmd), env=env, shell=True)
time.sleep(1)
p.kill()
| apache-2.0 |
chffelix/arvore | languages/zh-cn.py | 142 | 10465 | # coding: utf8
{
'!langcode!': 'zh-cn',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" 应为选择表达式, 格式如 "field1=\'value\'". 但是对 JOIN 的结果不可以使用 update 或者 delete"',
'%s %%{row} deleted': '已删除 %s 笔',
'%s %%{row} updated': '已更新 %s 笔',
'%s selected': '%s 已选择',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式类似 "zh-tw")',
'A new version of web2py is available': '新版 web2py 已推出',
'A new version of web2py is available: %s': '新版 web2py 已推出: %s',
'about': '关于',
'About': '关于',
'About application': '关于本应用程序',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在非安全连接环境下自动关闭',
'Admin is disabled because unsecure channel': '管理功能(Admin)在非安全连接环境下自动关闭',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '点击进入管理界面',
'Administrator Password:': '管理员密码:',
'Ajax Recipes': 'Ajax Recipes',
'An error occured, please %s the page': 'An error occured, please %s the page',
'appadmin is disabled because insecure channel': '管理界面在非安全通道下被禁用',
'Are you sure you want to delete file "%s"?': '确定要删除文件"%s"?',
'Are you sure you want to delete this object?': '确定要删除该对象么?',
'Are you sure you want to uninstall application "%s"': '确定要删除应用程序 "%s"',
'Are you sure you want to uninstall application "%s"?': '确定要删除应用程序 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登录管理账号需要安全连接(HTTPS)或是在本地连接(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因为在测试模式不保证多线程安全性,所以不可同时执行多个测试案例',
'ATTENTION: you cannot edit the running application!': '注意:不可编辑正在执行的应用程序!',
'Authentication': '验证',
'Available Databases and Tables': '可提供的数据库和数据表',
'Buy this book': '购买本书',
'cache': '高速缓存',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '编译失败:应用程序有错误,请排除错误后再尝试编译.',
'Change Password': '修改密码',
'change password': '修改密码',
'Check to delete': '打勾以示删除',
'Check to delete:': '打勾以示删除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客户端网址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版权所有',
'Create new application': '创建应用程序',
'Created By': 'Created By',
'Created On': 'Created On',
'Current request': '当前网络要求(request)',
'Current response': '当前网络响应(response)',
'Current session': '当前网络连接信息(session)',
'customize me!': '请调整我!',
'data uploaded': '数据已上传',
'Database': '数据库',
'Database %s select': '已选择 %s 数据库',
'Date and Time': '日期和时间',
'db': 'db',
'DB Model': '数据库模型',
'Delete': '删除',
'Delete:': '删除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '发布到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '设计',
'design': '设计',
'Design for': '设计用于',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': '下载',
'E-mail': '电子邮件',
'EDIT': '编辑',
'Edit': '编辑',
'Edit application': '编辑应用程序',
'Edit current record': '编辑当前记录',
'edit profile': '编辑配置文件',
'Edit Profile': '编辑配置文件',
'Edit This App': '编辑本应用程序',
'Editing file': '编辑文件',
'Editing file "%s"': '编辑文件"%s"',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Error logs for "%(app)s"': '"%(app)s"的错误记录',
'Errors': 'Errors',
'export as csv file': '以CSV格式导出',
'FAQ': 'FAQ',
'First name': '名',
'Forgot username?': '忘记用户名?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函数会显示 [passed].',
'Group ID': '群组编号',
'Groups': 'Groups',
'Hello World': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '导入/导出',
'Index': '索引',
'insert new': '插入新纪录',
'insert new %s': '插入新纪录 %s',
'Installed applications': '已安裝应用程序',
'Internal State': '內部状态',
'Introduction': 'Introduction',
'Invalid action': '非法操作(action)',
'Invalid email': '不符合电子邮件格式',
'Invalid Query': '无效的查询请求',
'invalid request': '网络要求无效',
'Is Active': 'Is Active',
'Key': 'Key',
'Language files (static strings) updated': '语言文件已更新',
'Languages': '各国语言',
'Last name': '姓',
'Last saved on:': '最后保存时间:',
'Layout': '网页布局',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '软件授权',
'Live Chat': 'Live Chat',
'login': '登录',
'Login': '登录',
'Login to the Administrative Interface': '登录到管理员界面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '忘记密码',
'Lost password?': '忘记密码?',
'Main Menu': '主菜单',
'Manage Cache': 'Manage Cache',
'Menu Model': '菜单模型(menu)',
'Models': '数据模型',
'Modified By': '修改者',
'Modified On': '修改时间',
'Modules': '程序模块',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新记录',
'new record inserted': '已插入新记录',
'next 100 rows': '往后 100 笔',
'NO': '否',
'No databases in this application': '该应用程序不含数据库',
'Object or table name': 'Object or table name',
'Online examples': '点击进入在线例子',
'or import from csv file': '或导入CSV文件',
'Origin': '原文',
'Original/Translation': '原文/翻译',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': '概览',
'Password': '密码',
"Password fields don't match": '密码不匹配',
'Peeking at file': '选择文件',
'Plugins': 'Plugins',
'Powered by': '基于下列技术构建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 笔',
'Python': 'Python',
'Query:': '查询:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '记录',
'record does not exist': '记录不存在',
'Record ID': '记录编号',
'Record id': '记录编号',
'Register': '注册',
'register': '注册',
'Registration identifier': 'Registration identifier',
'Registration key': '注册密钥',
'reload': 'reload',
'Remember me (for 30 days)': '记住我(30 天)',
'Reset Password key': '重置密码',
'Resolve Conflict file': '解决冲突文件',
'Role': '角色',
'Rows in Table': '在数据表里的记录',
'Rows selected': '笔记录被选择',
'Saved file hash:': '已保存文件的哈希值:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '状态',
'Static files': '静态文件',
'Statistics': '统计数据',
'Stylesheet': '网页样式表',
'submit': '提交',
'Submit': '提交',
'Support': 'Support',
'Sure you want to delete this object?': '确定要删除此对象?',
'Table': '数据表',
'Table name': '数据表名称',
'Testing application': '测试中的应用程序',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query"应是类似 "db.table1.field1==\'value\'" 的条件表达式. "db.table1.field1==db.table2.field2"的形式则代表执行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': '视图',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有数据库模型(models)',
'There are no modules': '沒有程序模块(modules)',
'There are no static files': '沒有静态文件',
'There are no translators, only default language is supported': '沒有对应的语言文件,仅支持原始语言',
'There are no views': '沒有视图',
'This App': '该应用',
'This is the %(filename)s template': '这是%(filename)s文件的模板(template)',
'Ticket': '问题清单',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '时间戳',
'Twitter': 'Twitter',
'Unable to check for upgrades': '查询新版本失败',
'Unable to download': '无法下载',
'Unable to download app': '无法下载应用程序',
'unable to parse csv file': '无法解析CSV文件',
'Update:': '更新:',
'Upload existing application': '上传已有应用程序',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式可得到更复杂的条件表达式, (...)&(...) 代表必须都满足, (...)|(...) 代表其一, ~(...)则代表否.',
'User %(id)s Logged-in': '用户 %(id)s 已登录',
'User %(id)s Registered': '用户 %(id)s 已注册',
'User ID': '用户编号',
'Verify Password': '验证密码',
'Videos': '视频',
'View': '查看',
'Views': '视图',
'Welcome': '欢迎',
'Welcome %s': '欢迎 %s',
'Welcome to web2py': '欢迎使用 web2py',
'Welcome to web2py!': '欢迎使用 web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': '您已成功运行 web2py',
'You can modify this application and adapt it to your needs': '请根据您的需要修改本程序',
'You visited the url %s': 'You visited the url %s',
}
| gpl-3.0 |
sublime-ycmd/sublime-ycmd | tests/lib/subtest.py | 1 | 1883 | #!/usr/bin/env python3
'''
tests/lib/subtest.py
Utility functions for running sub-tests within a test case. Includes additional
logging to add context during sub-test execution.
'''
import logging
import unittest
from tests.lib.decorator import log_function
logger = logging.getLogger('sublime-ycmd.' + __name__)
def _is_args_kwargs(test_case):
if not isinstance(test_case, (tuple, list)):
return False
if len(test_case) != 2:
return False
if not isinstance(test_case[1], dict):
return False
return True
def map_test_function(test_instance, test_function, test_cases):
assert isinstance(test_instance, unittest.TestCase), \
'test instance must be a unittest.TestCase: %r' % (test_instance)
assert callable(test_function), \
'test function must be callable: %r' % (test_function)
assert hasattr(test_cases, '__iter__'), \
'test cases must be iterable: %r' % (test_cases)
for test_index, test_case in enumerate(test_cases, start=1):
is_args_kwargs = _is_args_kwargs(test_case)
is_kwargs = isinstance(test_case, dict)
is_args = not (is_args_kwargs or is_kwargs)
if is_args_kwargs:
test_args, test_kwargs = test_case
elif is_kwargs:
test_args = tuple()
test_kwargs = test_case
elif is_args:
test_args = test_case
test_kwargs = dict()
log_args = is_args_kwargs or is_args
log_kwargs = is_args_kwargs or is_kwargs
wrapped_test_function = log_function(
desc='[%d]' % (test_index),
include_args=log_args, include_kwargs=log_kwargs,
)(test_function)
with test_instance.subTest(num=test_index,
args=test_args, kwargs=test_kwargs):
wrapped_test_function(*test_args, **test_kwargs)
| mit |
saguziel/incubator-airflow | airflow/ti_deps/deps/trigger_rule_dep.py | 22 | 8261 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import case, func
import airflow
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class TriggerRuleDep(BaseTIDep):
"""
Determines if a task's upstream tasks are in a state that allows a given task instance
to run.
"""
NAME = "Trigger Rule"
IGNOREABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
TI = airflow.models.TaskInstance
TR = airflow.models.TriggerRule
# Checking that all upstream dependencies have succeeded
if not ti.task.upstream_list:
yield self._passing_status(
reason="The task instance did not have any upstream tasks.")
return
if ti.task.trigger_rule == TR.DUMMY:
yield self._passing_status(reason="The task had a dummy trigger rule set.")
return
# TODO(unknown): this query becomes quite expensive with dags that have many
# tasks. It should be refactored to let the task report to the dag run and get the
# aggregates from there.
qry = (
session
.query(
func.coalesce(func.sum(
case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.FAILED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
func.count(TI.task_id),
)
.filter(
TI.dag_id == ti.dag_id,
TI.task_id.in_(ti.task.upstream_task_ids),
TI.execution_date == ti.execution_date,
TI.state.in_([
State.SUCCESS, State.FAILED,
State.UPSTREAM_FAILED, State.SKIPPED]),
)
)
successes, skipped, failed, upstream_failed, done = qry.first()
for dep_status in self._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=dep_context.flag_upstream_failed,
session=session):
yield dep_status
@provide_session
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session):
"""
Yields a dependency status that indicate whether the given task instance's trigger
rule was met.
:param ti: the task instance to evaluate the trigger rule of
:type ti: TaskInstance
:param successes: Number of successful upstream tasks
:type successes: boolean
:param skipped: Number of skipped upstream tasks
:type skipped: boolean
:param failed: Number of failed upstream tasks
:type failed: boolean
:param upstream_failed: Number of upstream_failed upstream tasks
:type upstream_failed: boolean
:param done: Number of completed upstream tasks
:type done: boolean
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: boolean
:param session: database session
:type session: Session
"""
TR = airflow.models.TriggerRule
task = ti.task
upstream = len(task.upstream_task_ids)
tr = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"successes": successes, "skipped": skipped, "failed": failed,
"upstream_failed": upstream_failed, "done": done
}
# TODO(aoen): Ideally each individual trigger rules would be it's own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if tr == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_SUCCESS:
if upstream_done and not successes:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
if tr == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task success, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task failure, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have failed, but found {1} non-failure(s). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_successes, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have completed, but found {1} task(s) that "
"weren't done. upstream_tasks_state={2}, "
"upstream_task_ids={3}"
.format(tr, upstream-done, upstream_tasks_state,
task.upstream_task_ids))
else:
yield self._failing_status(
reason="No strategy to evaluate trigger rule '{0}'.".format(tr))
| apache-2.0 |
ycl2045/nova-master | nova/tests/objects/test_aggregate.py | 11 | 6013 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova.objects import aggregate
from nova.openstack.common import timeutils
from nova.tests.objects import test_objects
NOW = timeutils.utcnow().replace(microsecond=0)
fake_aggregate = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'name': 'fake-aggregate',
'hosts': ['foo', 'bar'],
'metadetails': {'this': 'that'},
}
SUBS = {'metadata': 'metadetails'}
class _TestAggregateObject(object):
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'aggregate_get')
db.aggregate_get(self.context, 123).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_create(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
db.aggregate_create(self.context, {'name': 'foo'},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.create(self.context)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
db.aggregate_create(self.context, {'name': 'foo'},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.create(self.context)
self.assertRaises(exception.ObjectActionError, agg.create,
self.context)
def test_save(self):
self.mox.StubOutWithMock(db, 'aggregate_update')
db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn(
fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.name = 'baz'
agg.save(self.context)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_save_and_create_no_hosts(self):
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
self.assertRaises(exception.ObjectActionError,
agg.create, self.context)
self.assertRaises(exception.ObjectActionError,
agg.save, self.context)
def test_update_metadata(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_delete(self.context, 123, 'todelete')
db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'})
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg._context = self.context
agg.id = 123
agg.metadata = {'foo': 'bar'}
agg.obj_reset_changes()
agg.update_metadata({'todelete': None, 'toadd': 'myval'})
self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
def test_destroy(self):
self.mox.StubOutWithMock(db, 'aggregate_delete')
db.aggregate_delete(self.context, 123)
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.destroy(self.context)
def test_add_host(self):
self.mox.StubOutWithMock(db, 'aggregate_host_add')
db.aggregate_host_add(self.context, 123, 'bar'
).AndReturn({'host': 'bar'})
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo']
agg._context = self.context
agg.add_host('bar')
self.assertEqual(agg.hosts, ['foo', 'bar'])
def test_delete_host(self):
self.mox.StubOutWithMock(db, 'aggregate_host_delete')
db.aggregate_host_delete(self.context, 123, 'foo')
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
agg._context = self.context
agg.delete_host('foo')
self.assertEqual(agg.hosts, ['bar'])
def test_availability_zone(self):
agg = aggregate.Aggregate()
agg.metadata = {'availability_zone': 'foo'}
self.assertEqual('foo', agg.availability_zone)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'aggregate_get_all')
db.aggregate_get_all(self.context).AndReturn([fake_aggregate])
self.mox.ReplayAll()
aggs = aggregate.AggregateList.get_all(self.context)
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
def test_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_get_by_host')
db.aggregate_get_by_host(self.context, 'fake-host', key=None,
).AndReturn([fake_aggregate])
self.mox.ReplayAll()
aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
class TestAggregateObject(test_objects._LocalTest,
_TestAggregateObject):
pass
class TestRemoteAggregateObject(test_objects._RemoteTest,
_TestAggregateObject):
pass
| apache-2.0 |
kashefy/caffe_sandbox | nideep/iow/to_hdf5.py | 3 | 1574 | '''
Created on Jan 15, 2016
@author: kashefy
'''
import os
import numpy as np
import h5py
from nideep.blobs.mat_utils import expand_dims
def arrays_to_h5_fixed(arrs, key, path_dst):
'''
save list of arrays (all same size) to hdf5 under a single key
'''
with h5py.File(path_dst, 'w') as f:
f[key] = [expand_dims(x, 3) for x in arrs]
def split_hdf5(fpath_src, dir_dst, tot_floats=(20 * 1024 * 1024)):
if not os.path.isdir(dir_dst):
raise(IOError, "%s is not a directory." % (dir_dst,))
name_, ext = os.path.splitext(os.path.basename(fpath_src))
dst_paths = []
with h5py.File(fpath_src, 'r') as h_src:
keys = h_src.keys()
# determine largest chunk size
argmax_shape = np.argmax(np.prod([h_src[key].shape for key in keys], axis=1))
max_shape = h_src[keys[argmax_shape]].shape
split_sz = int(tot_floats / np.prod(max_shape[1:])) # largest no. of elements per split
split_count = 0
num_saved = 0
while num_saved < max_shape[0]:
fpath_dst = os.path.join(dir_dst, '%s_%03d%s' % (name_,
split_count,
ext))
with h5py.File(fpath_dst, ['a', 'w'][num_saved == 0]) as h_dst:
for key in keys:
h_dst[key] = h_src[key][num_saved:num_saved + split_sz]
dst_paths.append(fpath_dst)
num_saved += split_sz
split_count += 1
return dst_paths
| bsd-2-clause |
alexanderturner/ansible | lib/ansible/modules/storage/netapp/netapp_e_storagepool.py | 45 | 39823 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: netapp_e_storagepool
short_description: Manage disk groups and disk pools
version_added: '2.2'
description:
- Create or remove disk groups and disk pools for NetApp E-series storage arrays.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified storage pool should exist or not.
- Note that removing a storage pool currently requires the removal of all defined volumes first.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the storage pool to manage
criteria_drive_count:
description:
- The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place
criteria_drive_type:
description:
- The type of disk (hdd or ssd) to use when searching for candidates to use.
choices: ['hdd','ssd']
criteria_size_unit:
description:
- The unit used to interpret size parameters
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
criteria_drive_min_size:
description:
- The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
criteria_min_usable_capacity:
description:
- The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size.
criteria_drive_interface_type:
description:
- The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered)
choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
criteria_drive_require_fde:
description:
- Whether full disk encryption ability is required for drives to be added to the storage pool
raid_level:
required: true
choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']
description:
- "Only required when the requested state is 'present'. The RAID level of the storage pool to be created."
erase_secured_drives:
required: false
choices: ['true', 'false']
description:
- Whether to erase secured disks before adding to storage pool
secure_pool:
required: false
choices: ['true', 'false']
description:
- Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable.
reserve_drive_count:
required: false
description:
- Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools.
remove_volumes:
required: false
default: False
description:
- Prior to removing a storage pool, delete all volumes in the pool.
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
- name: No disk groups
netapp_e_storagepool:
ssid: "{{ ssid }}"
name: "{{ item }}"
state: absent
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: Json facts for the pool that was created.
'''
import json
import logging
from traceback import format_exc
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def select(predicate, iterable):
# python 2, 3 generic filtering.
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
class groupby(object):
# python 2, 3 generic grouping.
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
class NetAppESeriesStoragePool(object):
def __init__(self):
self._sp_drives_cached = None
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent'], type='str'),
ssid=dict(required=True, type='str'),
name=dict(required=True, type='str'),
criteria_size_unit=dict(default='gb', type='str'),
criteria_drive_count=dict(type='int'),
criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
type='str'),
criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'),
criteria_drive_min_size=dict(type='int'),
criteria_drive_require_fde=dict(type='bool'),
criteria_min_usable_capacity=dict(type='int'),
raid_level=dict(
choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']),
erase_secured_drives=dict(type='bool'),
log_path=dict(type='str'),
remove_drives=dict(type='list'),
secure_pool=dict(type='bool', default=False),
reserve_drive_count=dict(type='int'),
remove_volumes=dict(type='bool', default=False)
))
self.module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['raid_level'])
],
mutually_exclusive=[
],
# TODO: update validation for various selection criteria
supports_check_mode=True
)
p = self.module.params
log_path = p['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if log_path:
logging.basicConfig(level=logging.DEBUG, filename=log_path)
self.state = p['state']
self.ssid = p['ssid']
self.name = p['name']
self.validate_certs = p['validate_certs']
self.criteria_drive_count = p['criteria_drive_count']
self.criteria_drive_type = p['criteria_drive_type']
self.criteria_size_unit = p['criteria_size_unit']
self.criteria_drive_min_size = p['criteria_drive_min_size']
self.criteria_min_usable_capacity = p['criteria_min_usable_capacity']
self.criteria_drive_interface_type = p['criteria_drive_interface_type']
self.criteria_drive_require_fde = p['criteria_drive_require_fde']
self.raid_level = p['raid_level']
self.erase_secured_drives = p['erase_secured_drives']
self.remove_drives = p['remove_drives']
self.secure_pool = p['secure_pool']
self.reserve_drive_count = p['reserve_drive_count']
self.remove_volumes = p['remove_volumes']
try:
self.api_usr = p['api_username']
self.api_pwd = p['api_password']
self.api_url = p['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username "
"and api_password and api_url to the module.")
self.post_headers = dict(Accept="application/json")
self.post_headers['Content-Type'] = 'application/json'
# Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12.
# Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we
# can copy/paste to other modules more easily.
# Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects
# the first set that matches the specified count and/or aggregate capacity.
# class DriveSelector(object):
def filter_drives(
self,
drives, # raw drives resp
interface_type=None, # sas, sata, fibre, etc
drive_type=None, # ssd/hdd
spindle_speed=None, # 7200, 10000, 15000, ssd (=0)
min_drive_size=None,
max_drive_size=None,
fde_required=None,
size_unit='gb',
min_total_capacity=None,
min_drive_count=None,
exact_drive_count=None,
raid_level=None
):
if min_total_capacity is None and exact_drive_count is None:
raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.")
if min_total_capacity:
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
# filter clearly invalid/unavailable drives first
drives = select(lambda d: self._is_valid_drive(d), drives)
if interface_type:
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
if drive_type:
drives = select(lambda d: d['driveMediaType'] == drive_type, drives)
if spindle_speed is not None: # 0 is valid for ssds
drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives)
if min_drive_size:
min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit]
drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives)
if max_drive_size:
max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit]
drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives)
if fde_required:
drives = select(lambda d: d['fdeCapable'], drives)
# initial implementation doesn't have a preference for any of these values...
# just return the first set we find that matches the requested disk count and/or minimum total capacity
for (cur_capacity, drives_by_capacity) in groupby(drives, lambda d: int(d['rawCapacity'])):
for (cur_interface_type, drives_by_interface_type) in groupby(drives_by_capacity,
lambda d: d['phyDriveType']):
for (cur_drive_type, drives_by_drive_type) in groupby(drives_by_interface_type,
lambda d: d['driveMediaType']):
# listify so we can consume more than once
drives_by_drive_type = list(drives_by_drive_type)
candidate_set = list() # reset candidate list on each iteration of the innermost loop
if exact_drive_count:
if len(drives_by_drive_type) < exact_drive_count:
continue # we know this set is too small, move on
for drive in drives_by_drive_type:
candidate_set.append(drive)
if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity,
min_drive_count=min_drive_count,
exact_drive_count=exact_drive_count, raid_level=raid_level):
return candidate_set
raise Exception("couldn't find an available set of disks to match specified criteria")
def _is_valid_drive(self, d):
is_valid = d['available'] \
and d['status'] == 'optimal' \
and not d['pfa'] \
and not d['removed'] \
and not d['uncertified'] \
and not d['invalidDriveData'] \
and not d['nonRedundantAccess']
return is_valid
def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None,
exact_drive_count=None, raid_level=None):
if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count,
exact_drive_count=exact_drive_count, raid_level=raid_level):
return False
# TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set
if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']),
len(candidate_set),
raid_level=raid_level) < min_capacity_bytes:
return False
return True
def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None):
if raid_level in [None, 'raid0']:
return disk_size_bytes * disk_count
if raid_level == 'raid1':
return (disk_size_bytes * disk_count) / 2
if raid_level in ['raid3', 'raid5']:
return (disk_size_bytes * disk_count) - disk_size_bytes
if raid_level in ['raid6', 'raidDiskPool']:
return (disk_size_bytes * disk_count) - (disk_size_bytes * 2)
raise Exception("unsupported raid_level: %s" % raid_level)
def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None):
if exact_drive_count and exact_drive_count != drive_count:
return False
if raid_level == 'raidDiskPool':
if drive_count < 11:
return False
if raid_level == 'raid1':
if drive_count % 2 != 0:
return False
if raid_level in ['raid3', 'raid5']:
if drive_count < 3:
return False
if raid_level == 'raid6':
if drive_count < 4:
return False
if min_drive_count and drive_count < min_drive_count:
return False
return True
def get_storage_pool(self, storage_pool_name):
# global ifilter
self.debug("fetching storage pools")
# map the storage pool name to its id
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
rc = err.args[0]
if rc == 404 and self.state == 'absent':
self.module.exit_json(
msg="Storage pool [%s] did not exist." % (self.name))
else:
err = get_exception()
self.module.exit_json(
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
(self.ssid, str(err), self.state, rc))
self.debug("searching for storage pool '%s'" % storage_pool_name)
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
found = 'found'
else:
found = 'not found'
self.debug(found)
return pool_detail
def get_candidate_disks(self):
self.debug("getting candidate disks...")
# driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
# switch back to commented code below if it gets fixed
# drives_req = dict(
# driveCount = self.criteria_drive_count,
# sizeUnit = 'mb',
# raidLevel = self.raid_level
# )
#
# if self.criteria_drive_type:
# drives_req['driveType'] = self.criteria_drive_type
# if self.criteria_disk_min_aggregate_size_mb:
# drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
#
# # TODO: this arg appears to be ignored, uncomment if it isn't
# #if self.criteria_disk_min_size_gb:
# # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
# (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
#
# if rc == 204:
# self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
# disk_ids = [d['id'] for d in drives_resp]
try:
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except:
err = get_exception()
self.module.exit_json(
msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, str(err)))
try:
candidate_set = self.filter_drives(drives_resp,
exact_drive_count=self.criteria_drive_count,
drive_type=self.criteria_drive_type,
min_drive_size=self.criteria_drive_min_size,
raid_level=self.raid_level,
size_unit=self.criteria_size_unit,
min_total_capacity=self.criteria_min_usable_capacity,
interface_type=self.criteria_drive_interface_type,
fde_required=self.criteria_drive_require_fde
)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err)))
disk_ids = [d['id'] for d in candidate_set]
return disk_ids
def create_storage_pool(self):
self.debug("creating storage pool...")
sp_add_req = dict(
raidLevel=self.raid_level,
diskDriveIds=self.disk_ids,
name=self.name
)
if self.erase_secured_drives:
sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
data=json.dumps(sp_add_req), headers=self.post_headers, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(err)))
self.pool_detail = self.get_storage_pool(self.name)
if self.secure_pool:
secure_pool_data = dict(securePool=True)
try:
(retc, r) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(err)))
@property
def needs_raid_level_migration(self):
current_raid_level = self.pool_detail['raidLevel']
needs_migration = self.raid_level != current_raid_level
if needs_migration: # sanity check some things so we can fail early/check-mode
if current_raid_level == 'raidDiskPool':
self.module.fail_json(msg="raid level cannot be changed for disk pools")
return needs_migration
def migrate_raid_level(self):
self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
sp_raid_migrate_req = dict(
raidLevel=self.raid_level
)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid,
self.name),
data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST',
url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
@property
def sp_drives(self, exclude_hotspares=True):
if not self._sp_drives_cached:
self.debug("fetching drive list...")
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, str(err)))
sp_id = self.pool_detail['id']
if exclude_hotspares:
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']]
else:
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id]
return self._sp_drives_cached
@property
def reserved_drive_count_differs(self):
if int(self.pool_detail['volumeGroupData']['diskPoolData'][
'reconstructionReservedDriveCount']) != self.reserve_drive_count:
return True
return False
@property
def needs_expansion(self):
if self.criteria_drive_count > len(self.sp_drives):
return True
# TODO: is totalRaidedSpace the best attribute for "how big is this SP"?
if self.criteria_min_usable_capacity and \
(self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']):
return True
return False
def get_expansion_candidate_drives(self):
# sanity checks; don't call this if we can't/don't need to expand
if not self.needs_expansion:
self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion")
self.debug("fetching expansion candidate drives...")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
self.pool_detail['id']),
method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
current_drive_count = len(self.sp_drives)
current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use?
if self.criteria_min_usable_capacity:
requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]
else:
requested_capacity_bytes = current_capacity_bytes
if self.criteria_drive_count:
minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1)
else:
minimum_disks_to_add = 1
minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0)
# FUTURE: allow more control over expansion candidate selection?
# loop over candidate disk sets and add until we've met both criteria
added_drive_count = 0
added_capacity_bytes = 0
drives_to_add = set()
for s in resp:
# don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets
candidate_drives = s['drives']
if len(drives_to_add.intersection(candidate_drives)) != 0:
# duplicate, skip
continue
drives_to_add.update(candidate_drives)
added_drive_count += len(candidate_drives)
added_capacity_bytes += int(s['usableCapacity'])
if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add:
break
if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add):
self.module.fail_json(
msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % (
minimum_disks_to_add, minimum_bytes_to_add))
return list(drives_to_add)
def expand_storage_pool(self):
drives_to_add = self.get_expansion_candidate_drives()
self.debug("adding %s drives to storage pool..." % len(drives_to_add))
sp_expand_req = dict(
drives=drives_to_add
)
try:
request(
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
self.pool_detail['id']),
data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(
err)))
# TODO: check response
# TODO: support blocking wait?
def reduce_drives(self, drive_list):
if all(drive in drive_list for drive in self.sp_drives):
# all the drives passed in are present in the system
pass
else:
self.module.fail_json(
msg="One of the drives you wish to remove does not currently exist in the storage pool you specified")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid,
self.pool_detail['id']),
data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
def update_reserve_drive_count(self, qty):
data = dict(reservedDriveCount=qty)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(
err)))
def apply(self):
changed = False
pool_exists = False
self.pool_detail = self.get_storage_pool(self.name)
if self.pool_detail:
pool_exists = True
pool_id = self.pool_detail['id']
if self.state == 'absent':
self.debug("CHANGED: storage pool exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# sanity checks first- we can't change these, so we'll bomb if they're specified
if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']:
self.module.fail_json(
msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'],
self.criteria_drive_type))
# now the things we can change...
if self.needs_expansion:
self.debug("CHANGED: storage pool needs expansion")
changed = True
if self.needs_raid_level_migration:
self.debug(
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
self.pool_detail['raidLevel'], self.raid_level))
changed = True
# if self.reserved_drive_count_differs:
# changed = True
# TODO: validate other state details? (pool priority, alert threshold)
# per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction
# presents a difficult parameter issue, as the disk count can increase due to expansion, so we
# can't just use disk count > criteria_drive_count.
else: # pool does not exist
if self.state == 'present':
self.debug("CHANGED: storage pool does not exist, but requested state is 'present'")
changed = True
# ensure we can get back a workable set of disks
# (doing this early so candidate selection runs under check mode)
self.disk_ids = self.get_candidate_disks()
else:
self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name))
if changed and not self.module.check_mode:
# apply changes
if self.state == 'present':
if not pool_exists:
self.create_storage_pool()
else: # pool exists but differs, modify...
if self.needs_expansion:
self.expand_storage_pool()
if self.remove_drives:
self.reduce_drives(self.remove_drives)
if self.needs_raid_level_migration:
self.migrate_raid_level()
# if self.reserved_drive_count_differs:
# self.update_reserve_drive_count(self.reserve_drive_count)
if self.secure_pool:
secure_pool_data = dict(securePool=True)
try:
(retc, r) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid,
self.pool_detail[
'id']),
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
except:
err = get_exception()
self.module.exit_json(
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
if int(retc) == 422:
self.module.fail_json(
msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable")
elif self.state == 'absent':
# delete the storage pool
try:
remove_vol_opt = ''
if self.remove_volumes:
remove_vol_opt = '?delete-volumes=true'
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id,
remove_vol_opt),
method='DELETE',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except:
err = get_exception()
self.module.exit_json(
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(err)))
self.module.exit_json(changed=changed, **self.pool_detail)
def main():
sp = NetAppESeriesStoragePool()
try:
sp.apply()
except Exception:
e = get_exception()
sp.debug("Exception in apply(): \n%s" % format_exc(e))
raise
if __name__ == '__main__':
main()
| gpl-3.0 |
oleksa-pavlenko/gae-django-project-template | django/db/models/sql/expressions.py | 56 | 4487 | import copy
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
class SQLEvaluator(object):
def __init__(self, expression, query, allow_joins=True, reuse=None):
self.expression = expression
self.opts = query.get_meta()
self.reuse = reuse
self.cols = []
self.expression.prepare(self, query, allow_joins)
def relabeled_clone(self, change_map):
clone = copy.copy(self)
clone.cols = []
for node, col in self.cols:
if hasattr(col, 'relabeled_clone'):
clone.cols.append((node, col.relabeled_clone(change_map)))
else:
clone.cols.append((node,
(change_map.get(col[0], col[0]), col[1])))
return clone
def get_group_by_cols(self):
cols = []
for node, col in self.cols:
if hasattr(node, 'get_group_by_cols'):
cols.extend(node.get_group_by_cols())
elif isinstance(col, tuple):
cols.append(col)
return cols
def prepare(self):
return self
def as_sql(self, qn, connection):
return self.expression.evaluate(self, qn, connection)
#####################################################
# Visitor methods for initial expression preparation #
#####################################################
def prepare_node(self, node, query, allow_joins):
for child in node.children:
if hasattr(child, 'prepare'):
child.prepare(self, query, allow_joins)
def prepare_leaf(self, node, query, allow_joins):
if not allow_joins and LOOKUP_SEP in node.name:
raise FieldError("Joined field references are not permitted in this query")
field_list = node.name.split(LOOKUP_SEP)
if node.name in query.aggregates:
self.cols.append((node, query.aggregate_select[node.name]))
else:
try:
field, sources, opts, join_list, path = query.setup_joins(
field_list, query.get_meta(),
query.get_initial_alias(), self.reuse)
self._used_joins = join_list
targets, _, join_list = query.trim_joins(sources, join_list, path)
if self.reuse is not None:
self.reuse.update(join_list)
for t in targets:
self.cols.append((node, (join_list[-1], t.column)))
except FieldDoesNotExist:
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (self.name,
[f.name for f in self.opts.fields]))
##################################################
# Visitor methods for final expression evaluation #
##################################################
def evaluate_node(self, node, qn, connection):
expressions = []
expression_params = []
for child in node.children:
if hasattr(child, 'evaluate'):
sql, params = child.evaluate(self, qn, connection)
else:
sql, params = '%s', (child,)
if len(getattr(child, 'children', [])) > 1:
format = '(%s)'
else:
format = '%s'
if sql:
expressions.append(format % sql)
expression_params.extend(params)
return connection.ops.combine_expression(node.connector, expressions), expression_params
def evaluate_leaf(self, node, qn, connection):
col = None
for n, c in self.cols:
if n is node:
col = c
break
if col is None:
raise ValueError("Given node not found")
if hasattr(col, 'as_sql'):
return col.as_sql(qn, connection)
else:
return '%s.%s' % (qn(col[0]), qn(col[1])), []
def evaluate_date_modifier_node(self, node, qn, connection):
timedelta = node.children.pop()
sql, params = self.evaluate_node(node, qn, connection)
node.children.append(timedelta)
if (timedelta.days == timedelta.seconds == timedelta.microseconds == 0):
return sql, params
return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
| mit |
aimas/TuniErp-8.0 | addons/account_bank_statement_extensions/__init__.py | 442 | 1153 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import res_partner_bank
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
birdonwheels5/p2pool-dgbsha | wstools/tests/test_wsdl.py | 289 | 5568 | #!/usr/bin/env python
############################################################################
# Joshua R. Boverhof, David W. Robertson, LBNL
# See LBNLCopyright for copyright notice!
###########################################################################
import sys, unittest
import ConfigParser
import os
from wstools.Utility import DOM
from wstools.WSDLTools import WSDLReader
from wstools.TimeoutSocket import TimeoutError
from wstools import tests
cwd = os.path.dirname(tests.__file__)
class WSDLToolsTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.path = nameGenerator.next()
print self.path
sys.stdout.flush()
def __str__(self):
teststr = unittest.TestCase.__str__(self)
if hasattr(self, "path"):
return "%s: %s" % (teststr, self.path )
else:
return "%s" % (teststr)
def checkWSDLCollection(self, tag_name, component, key='name'):
if self.wsdl is None:
return
definition = self.wsdl.document.documentElement
version = DOM.WSDLUriToVersion(definition.namespaceURI)
nspname = DOM.GetWSDLUri(version)
for node in DOM.getElements(definition, tag_name, nspname):
name = DOM.getAttr(node, key)
comp = component[name]
self.failUnlessEqual(eval('comp.%s' %key), name)
def checkXSDCollection(self, tag_name, component, node, key='name'):
for cnode in DOM.getElements(node, tag_name):
name = DOM.getAttr(cnode, key)
component[name]
def test_all(self):
try:
if self.path[:7] == 'http://':
self.wsdl = WSDLReader().loadFromURL(self.path)
else:
self.wsdl = WSDLReader().loadFromFile(self.path)
except TimeoutError:
print "connection timed out"
sys.stdout.flush()
return
except:
self.path = self.path + ": load failed, unable to start"
raise
try:
self.checkWSDLCollection('service', self.wsdl.services)
except:
self.path = self.path + ": wsdl.services"
raise
try:
self.checkWSDLCollection('message', self.wsdl.messages)
except:
self.path = self.path + ": wsdl.messages"
raise
try:
self.checkWSDLCollection('portType', self.wsdl.portTypes)
except:
self.path = self.path + ": wsdl.portTypes"
raise
try:
self.checkWSDLCollection('binding', self.wsdl.bindings)
except:
self.path = self.path + ": wsdl.bindings"
raise
try:
self.checkWSDLCollection('import', self.wsdl.imports, key='namespace')
except:
self.path = self.path + ": wsdl.imports"
raise
try:
for key in self.wsdl.types.keys():
schema = self.wsdl.types[key]
self.failUnlessEqual(key, schema.getTargetNamespace())
definition = self.wsdl.document.documentElement
version = DOM.WSDLUriToVersion(definition.namespaceURI)
nspname = DOM.GetWSDLUri(version)
for node in DOM.getElements(definition, 'types', nspname):
for snode in DOM.getElements(node, 'schema'):
tns = DOM.findTargetNS(snode)
schema = self.wsdl.types[tns]
self.schemaAttributesDeclarations(schema, snode)
self.schemaAttributeGroupDeclarations(schema, snode)
self.schemaElementDeclarations(schema, snode)
self.schemaTypeDefinitions(schema, snode)
except:
self.path = self.path + ": wsdl.types"
raise
if self.wsdl.extensions:
print 'No check for WSDLTools(%s) Extensions:' %(self.wsdl.name)
for ext in self.wsdl.extensions: print '\t', ext
def schemaAttributesDeclarations(self, schema, node):
self.checkXSDCollection('attribute', schema.attr_decl, node)
def schemaAttributeGroupDeclarations(self, schema, node):
self.checkXSDCollection('group', schema.attr_groups, node)
def schemaElementDeclarations(self, schema, node):
self.checkXSDCollection('element', schema.elements, node)
def schemaTypeDefinitions(self, schema, node):
self.checkXSDCollection('complexType', schema.types, node)
self.checkXSDCollection('simpleType', schema.types, node)
def setUpOptions(section):
cp = ConfigParser.ConfigParser()
cp.read(cwd+'/config.txt')
if not cp.sections():
print 'fatal error: configuration file config.txt not present'
sys.exit(0)
if not cp.has_section(section):
print '%s section not present in configuration file, exiting' % section
sys.exit(0)
return cp, len(cp.options(section))
def getOption(cp, section):
for name, value in cp.items(section):
yield value
def makeTestSuite(section='services_by_file'):
global nameGenerator
cp, numTests = setUpOptions(section)
nameGenerator = getOption(cp, section)
suite = unittest.TestSuite()
for i in range(0, numTests):
suite.addTest(unittest.makeSuite(WSDLToolsTestCase, 'test_'))
return suite
def main():
unittest.main(defaultTest="makeTestSuite")
if __name__ == "__main__" : main()
| gpl-3.0 |
ducminhn/three.js | utils/converters/msgpack/msgpack/__init__.py | 659 | 1385 | # coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| mit |
keedio/hue | desktop/core/ext-py/cx_Oracle-5.1.2/setup.py | 31 | 12987 | """Distutils script for cx_Oracle.
Windows platforms:
python setup.py build --compiler=mingw32 install
Unix platforms
python setup.py build install
"""
import distutils.command
try:
import distutils.command.bdist_msi
except ImportError:
distutils.command.bdist_msi = None
try:
import distutils.command.bdist_wininst
except ImportError:
distutils.command.bdist_wininst = None
import distutils.command.bdist_rpm
import distutils.command.build
import distutils.core
import distutils.dist
import distutils.util
import os
import re
import struct
import sys
from distutils.errors import DistutilsSetupError
# if setuptools is detected, use it to add support for eggs
try:
from setuptools import setup, Extension
except:
from distutils.core import setup
from distutils.extension import Extension
# define build constants
BUILD_VERSION = "5.1.2"
# define the list of files to be included as documentation for Windows
dataFiles = None
if sys.platform in ("win32", "cygwin"):
baseName = "cx_Oracle-doc"
dataFiles = [ (baseName,
[ "BUILD.txt", "LICENSE.TXT", "README.TXT", "HISTORY.txt"]) ]
for dir in ("html", "html/_static", "samples", "test"):
files = []
fullDirName = "%s/%s" % (baseName, dir)
for name in os.listdir(dir):
if name.startswith("."):
continue
if os.path.isdir(os.path.join(dir, name)):
continue
fullName = "%s/%s" % (dir, name)
files.append(fullName)
dataFiles.append((fullDirName, files))
# define the list of files to be included as documentation for bdist_rpm
docFiles = "LICENSE.txt README.txt BUILD.txt HISTORY.txt html samples test"
# method for checking a potential Oracle home
def CheckOracleHome(directoryToCheck):
global oracleHome, oracleVersion, oracleLibDir
import os
import struct
import sys
if sys.platform in ("win32", "cygwin"):
subDirs = ["bin"]
filesToCheck = [
("11g", "oraocci11.dll"),
("10g", "oraocci10.dll"),
("9i", "oraclient9.dll")
]
elif sys.platform == "darwin":
subDirs = ["lib"]
filesToCheck = [
("11g", "libclntsh.dylib.11.1"),
("10g", "libclntsh.dylib.10.1"),
("9i", "libclntsh.dylib.9.0")
]
else:
if struct.calcsize("P") == 4:
subDirs = ["lib", "lib32"]
else:
subDirs = ["lib", "lib64"]
filesToCheck = [
("11g", "libclntsh.so.11.1"),
("10g", "libclntsh.so.10.1"),
("9i", "libclntsh.so.9.0")
]
for version, baseFileName in filesToCheck:
fileName = os.path.join(directoryToCheck, baseFileName)
if os.path.exists(fileName):
if os.path.basename(directoryToCheck).lower() == "bin":
oracleHome = os.path.dirname(directoryToCheck)
else:
oracleHome = directoryToCheck
oracleLibDir = directoryToCheck
oracleVersion = version
return True
for subDir in subDirs:
fileName = os.path.join(directoryToCheck, subDir, baseFileName)
if os.path.exists(fileName):
oracleHome = directoryToCheck
oracleLibDir = os.path.join(directoryToCheck, subDir)
oracleVersion = version
return True
dirName = os.path.dirname(directoryToCheck)
fileName = os.path.join(dirName, subDir, baseFileName)
if os.path.exists(fileName):
oracleHome = dirName
oracleLibDir = os.path.join(dirName, subDir)
oracleVersion = version
return True
oracleHome = oracleVersion = oracleLibDir = None
return False
# try to determine the Oracle home
userOracleHome = os.environ.get("ORACLE_HOME", os.environ.get("ORACLE_INSTANTCLIENT_HOME"))
if userOracleHome is not None:
if not CheckOracleHome(userOracleHome):
messageFormat = "Oracle home (%s) does not refer to an " \
"9i, 10g or 11g installation."
raise DistutilsSetupError(messageFormat % userOracleHome)
else:
for path in os.environ["PATH"].split(os.pathsep):
if CheckOracleHome(path):
break
if oracleHome is None:
print >>sys.stderr, "cannot locate an Oracle software installation. skipping"
sys.exit(0)
# define some variables
if sys.platform == "win32":
libDirs = [os.path.join(oracleHome, "bin"), oracleHome,
os.path.join(oracleHome, "oci", "lib", "msvc"),
os.path.join(oracleHome, "sdk", "lib", "msvc")]
possibleIncludeDirs = ["oci/include", "rdbms/demo", "sdk/include"]
includeDirs = []
for dir in possibleIncludeDirs:
path = os.path.normpath(os.path.join(oracleHome, dir))
if os.path.isdir(path):
includeDirs.append(path)
if not includeDirs:
message = "cannot locate Oracle include files in %s" % oracleHome
raise DistutilsSetupError(message)
libs = ["oci"]
elif sys.platform == "cygwin":
includeDirs = ["/usr/include", "rdbms/demo", "rdbms/public", \
"network/public", "oci/include"]
libDirs = ["bin", "lib"]
for i in range(len(includeDirs)):
includeDirs[i] = os.path.join(oracleHome, includeDirs[i])
for i in range(len(libDirs)):
libDirs[i] = os.path.join(oracleHome, libDirs[i])
libs = ["oci"]
else:
libDirs = [oracleLibDir]
libs = ["clntsh"]
possibleIncludeDirs = ["rdbms/demo", "rdbms/public", "network/public",
"sdk/include"]
if sys.platform == "darwin":
possibleIncludeDirs.append("plsql/public")
includeDirs = []
for dir in possibleIncludeDirs:
path = os.path.join(oracleHome, dir)
if os.path.isdir(path):
includeDirs.append(path)
if not includeDirs:
path = os.path.join(oracleLibDir, "include")
if os.path.isdir(path):
includeDirs.append(path)
if not includeDirs:
path = re.sub("lib(64)?", "include", oracleHome)
if os.path.isdir(path):
includeDirs.append(path)
if not includeDirs:
raise DistutilsSetupError("cannot locate Oracle include files")
# NOTE: on HP-UX Itanium with Oracle 10g you need to add the library "ttsh10"
# to the list of libraries along with "clntsh"; since I am unable to test, I'll
# leave this as a comment until someone can verify when this is required
# without making other cases where sys.platform == "hp-ux11" stop working
# setup extra link and compile args
extraCompileArgs = ["-DBUILD_VERSION=%s" % BUILD_VERSION]
extraLinkArgs = []
if sys.platform == "aix4":
extraCompileArgs.append("-qcpluscmt")
elif sys.platform == "aix5":
extraCompileArgs.append("-DAIX5")
elif sys.platform == "cygwin":
extraCompileArgs.append("-mno-cygwin")
extraLinkArgs.append("-Wl,--enable-runtime-pseudo-reloc")
elif sys.platform == "darwin":
extraLinkArgs.append("-shared-libgcc")
# force the inclusion of an RPATH linker directive if desired; this will
# eliminate the need for setting LD_LIBRARY_PATH but it also means that this
# location will be the only location searched for the Oracle client library
if "FORCE_RPATH" in os.environ:
extraLinkArgs.append("-Wl,-rpath,%s" % oracleLibDir)
# tweak distribution full name to include the Oracle version
class Distribution(distutils.dist.Distribution):
def get_fullname_with_oracle_version(self):
name = self.metadata.get_fullname()
return "%s-%s" % (name, oracleVersion)
# tweak the RPM build command to include the Python and Oracle version
class bdist_rpm(distutils.command.bdist_rpm.bdist_rpm):
def run(self):
distutils.command.bdist_rpm.bdist_rpm.run(self)
specFile = os.path.join(self.rpm_base, "SPECS",
"%s.spec" % self.distribution.get_name())
queryFormat = "%{name}-%{version}-%{release}.%{arch}.rpm"
command = "rpm -q --qf '%s' --specfile %s" % (queryFormat, specFile)
origFileName = os.popen(command).read()
parts = origFileName.split("-")
parts.insert(2, oracleVersion)
parts.insert(3, "py%s%s" % sys.version_info[:2])
newFileName = "-".join(parts)
self.move_file(os.path.join("dist", origFileName),
os.path.join("dist", newFileName))
# tweak the build directories to include the Oracle version
class build(distutils.command.build.build):
def finalize_options(self):
import distutils.util
import os
import sys
platSpecifier = ".%s-%s-%s" % \
(distutils.util.get_platform(), sys.version[0:3],
oracleVersion)
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
"lib%s" % platSpecifier)
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
"temp%s" % platSpecifier)
distutils.command.build.build.finalize_options(self)
class test(distutils.core.Command):
description = "run the test suite for the extension"
user_options = []
def finalize_options(self):
pass
def initialize_options(self):
pass
def run(self):
self.run_command("build")
buildCommand = self.distribution.get_command_obj("build")
sys.path.insert(0, os.path.abspath("test"))
sys.path.insert(0, os.path.abspath(buildCommand.build_lib))
if sys.version_info[0] < 3:
execfile(os.path.join("test", "test.py"))
else:
fileName = os.path.join("test", "test3k.py")
exec(open(fileName).read())
commandClasses = dict(build = build, bdist_rpm = bdist_rpm, test = test)
# tweak the Windows installer names to include the Oracle version
if distutils.command.bdist_msi is not None:
class bdist_msi(distutils.command.bdist_msi.bdist_msi):
def run(self):
origMethod = self.distribution.get_fullname
self.distribution.get_fullname = \
self.distribution.get_fullname_with_oracle_version
distutils.command.bdist_msi.bdist_msi.run(self)
self.distribution.get_fullname = origMethod
commandClasses["bdist_msi"] = bdist_msi
if distutils.command.bdist_wininst is not None:
class bdist_wininst(distutils.command.bdist_wininst.bdist_wininst):
def run(self):
origMethod = self.distribution.get_fullname
self.distribution.get_fullname = \
self.distribution.get_fullname_with_oracle_version
distutils.command.bdist_wininst.bdist_wininst.run(self)
self.distribution.get_fullname = origMethod
commandClasses["bdist_wininst"] = bdist_wininst
# define classifiers for the package index
classifiers = [
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database"
]
# setup the extension
extension = Extension(
name = "cx_Oracle",
include_dirs = includeDirs,
libraries = libs,
library_dirs = libDirs,
extra_compile_args = extraCompileArgs,
extra_link_args = extraLinkArgs,
sources = ["cx_Oracle.c"],
depends = ["Buffer.c", "Callback.c", "Connection.c", "Cursor.c",
"CursorVar.c", "DateTimeVar.c", "Environment.c", "Error.c",
"ExternalLobVar.c", "ExternalObjectVar.c", "IntervalVar.c",
"LobVar.c", "LongVar.c", "NumberVar.c", "ObjectType.c",
"ObjectVar.c", "SessionPool.c", "StringVar.c",
"Subscription.c", "TimestampVar.c", "Transforms.c",
"Variable.c"])
# perform the setup
setup(
name = "cx_Oracle",
version = BUILD_VERSION,
distclass = Distribution,
description = "Python interface to Oracle",
data_files = dataFiles,
cmdclass = commandClasses,
options = dict(bdist_rpm = dict(doc_files = docFiles)),
long_description = \
"Python interface to Oracle conforming to the Python DB API 2.0 "
"specification.\n"
"See http://www.python.org/topics/database/DatabaseAPI-2.0.html.",
author = "Anthony Tuininga",
author_email = "anthony.tuininga@gmail.com",
url = "http://cx-oracle.sourceforge.net",
ext_modules = [extension],
keywords = "Oracle",
license = "Python Software Foundation License",
classifiers = classifiers)
| apache-2.0 |
EduPepperPDTesting/pepper2013-testing | cms/djangoapps/contentstore/features/course-overview.py | 8 | 4543 | # pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import *
from nose.tools import assert_true, assert_false, assert_equal # pylint: disable=E0611
from logging import getLogger
logger = getLogger(__name__)
@step(u'I have a course with no sections$')
def have_a_course(step):
world.clear_courses()
course = world.CourseFactory.create()
@step(u'I have a course with 1 section$')
def have_a_course_with_1_section(step):
world.clear_courses()
course = world.CourseFactory.create()
section = world.ItemFactory.create(parent_location=course.location)
subsection1 = world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',)
@step(u'I have a course with multiple sections$')
def have_a_course_with_two_sections(step):
world.clear_courses()
course = world.CourseFactory.create()
section = world.ItemFactory.create(parent_location=course.location)
subsection1 = world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',)
section2 = world.ItemFactory.create(
parent_location=course.location,
display_name='Section Two',)
subsection2 = world.ItemFactory.create(
parent_location=section2.location,
category='sequential',
display_name='Subsection Alpha',)
subsection3 = world.ItemFactory.create(
parent_location=section2.location,
category='sequential',
display_name='Subsection Beta',)
@step(u'I navigate to the course overview page$')
def navigate_to_the_course_overview_page(step):
create_studio_user(is_staff=True)
log_into_studio()
course_locator = 'a.course-link'
world.css_click(course_locator)
@step(u'I navigate to the courseware page of a course with multiple sections')
def nav_to_the_courseware_page_of_a_course_with_multiple_sections(step):
step.given('I have a course with multiple sections')
step.given('I navigate to the course overview page')
@step(u'I add a section')
def i_add_a_section(step):
add_section(name='My New Section That I Just Added')
@step(u'I click the "([^"]*)" link$')
def i_click_the_text_span(step, text):
span_locator = '.toggle-button-sections span'
assert_true(world.browser.is_element_present_by_css(span_locator))
# first make sure that the expand/collapse text is the one you expected
assert_equal(world.browser.find_by_css(span_locator).value, text)
world.css_click(span_locator)
@step(u'I collapse the first section$')
def i_collapse_a_section(step):
collapse_locator = 'section.courseware-section a.collapse'
world.css_click(collapse_locator)
@step(u'I expand the first section$')
def i_expand_a_section(step):
expand_locator = 'section.courseware-section a.expand'
world.css_click(expand_locator)
@step(u'I see the "([^"]*)" link$')
def i_see_the_span_with_text(step, text):
span_locator = '.toggle-button-sections span'
assert_true(world.is_css_present(span_locator))
assert_equal(world.css_value(span_locator), text)
assert_true(world.css_visible(span_locator))
@step(u'I do not see the "([^"]*)" link$')
def i_do_not_see_the_span_with_text(step, text):
# Note that the span will exist on the page but not be visible
span_locator = '.toggle-button-sections span'
assert_true(world.is_css_present(span_locator))
assert_false(world.css_visible(span_locator))
@step(u'all sections are expanded$')
def all_sections_are_expanded(step):
subsection_locator = 'div.subsection-list'
subsections = world.css_find(subsection_locator)
for index in range(len(subsections)):
assert_true(world.css_visible(subsection_locator, index=index))
@step(u'all sections are collapsed$')
def all_sections_are_collapsed(step):
subsection_locator = 'div.subsection-list'
subsections = world.css_find(subsection_locator)
for index in range(len(subsections)):
assert_false(world.css_visible(subsection_locator, index=index))
@step(u"I change an assignment's grading status")
def change_grading_status(step):
world.css_find('a.menu-toggle').click()
world.css_find('.menu li').first.click()
@step(u'I reorder subsections')
def reorder_subsections(_step):
draggable_css = 'a.drag-handle'
ele = world.css_find(draggable_css).first
ele.action_chains.drag_and_drop_by_offset(
ele._element,
30,
0
).perform()
| agpl-3.0 |
charlesll/RamPy | legacy_code/IR_dec_comb.py | 1 | 6585 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 22 07:54:05 2014
@author: charleslelosq
Carnegie Institution for Science
"""
import sys
sys.path.append("/Users/charleslelosq/Documents/RamPy/lib-charles/")
import csv
import numpy as np
import scipy
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
from StringIO import StringIO
from scipy import interpolate
# to fit spectra we use the lmfit software of Matt Newville, CARS, university of Chicago, available on the web
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, fit_report
from spectratools import * #Charles' libraries and functions
from Tkinter import *
import tkMessageBox
from tkFileDialog import askopenfilename
#### We define a set of functions that will be used for fitting data
#### unfortunatly, as we use lmfit (which is convenient because it can fix or release
#### easily the parameters) we are not able to use arrays for parameters...
#### so it is a little bit long to write all the things, but in a way quite robust also...
#### gaussian and pseudovoigt functions are available in spectratools
#### if you need a voigt, fix the gaussian-to-lorentzian ratio to 1 in the parameter definition before
#### doing the data fit
def residual(pars, x, data=None, eps=None):
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
f1 = pars['f1'].value
f2 = pars['f2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
# Gaussian model
peak1 = gaussian(x,a1,f1,l1)
peak2 = gaussian(x,a2,f2,l2)
model = peak1 + peak2
if data is None:
return model, peak1, peak2
if eps is None:
return (model - data)
return (model - data)/eps
##### CORE OF THE CALCULATION BELOW
#### CALLING THE DATA NAMES
tkMessageBox.showinfo(
"Open file",
"Please open the list of spectra")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
with open(filename) as inputfile:
results = list(csv.reader(inputfile)) # we read the data list
#### LOOP FOR BEING ABLE TO TREAT MULTIPLE DATA
#### WARNING: OUTPUT ARE AUTOMATICALLY GENERATED IN A DIRECTORY CALLED "DECONV"
#### (see end) THAT SHOULD BE PRESENT !!!!!!!!!!
for lg in range(len(results)):
name = str(results[lg]).strip('[]')
name = name[1:-1] # to remove unwanted ""
sample = np.genfromtxt(name) # get the sample to deconvolute
# we set here the lower and higher bonds for the interest region
lb = 4700 ### MAY NEED TO AJUST THAT
hb = 6000
interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))]
ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese as a percentage, we assume that the treatment was made correctly for error determination... if not, please put sigma = None
interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after
sigma = abs(ese0*interestspectra[:,1]) #calculate good ese
#sigma = None # you can activate that if you are not sure about the errors
xfit = interestspectra[:,0] # region to be fitted
data = interestspectra[:,1] # region to be fitted
params = Parameters()
####################### FOR MELT:
####################### COMMENT IF NOT WANTED
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('a1', 1, True, 0, None, None),
('f1', 5200, True, 750, None, None),
('l1', 1, True, 0, None, None),
('a2', 1, True, 0, None, None),
('f2', 5400, True, None, None, None),
('l2', 1, True, None, None, None))
result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy
model = fit_report(params) # the report
yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks
#### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians
# Select interest areas for calculating the areas of OH and H2Omol peaks
intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))]
area4500 = np.trapz(intarea45[:,1],intarea45[:,0])
esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area)
# now for the gaussians
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
AireG1 = gaussianarea(a1,l1)
AireG2 = gaussianarea(a2,l2)
##### WE DO A NICE FIGURE THAT CAN BE IMPROVED FOR PUBLICATION
fig = figure()
plot(sample[:,0],sample[:,1],'k-')
plot(xfit,yout,'r-')
plot(xfit,peak1,'b-')
plot(xfit,peak2,'b-')
xlim(lb,hb)
ylim(0,np.max(sample[:,1]))
xlabel("Wavenumber, cm$^{-1}$", fontsize = 18, fontweight = "bold")
ylabel("Absorption, a. u.", fontsize = 18, fontweight = "bold")
text(4000,np.max(intarea45[:,1])+0.03*np.max(intarea45[:,1]),('Area OH: \n'+'%.1f' % area4500),color='b',fontsize = 16)
text(4650,a1 + 0.05*a1,('Area pic 1$: \n'+ '%.1f' % AireG1),color='b',fontsize = 16)
text(5000,a2 + 0.05*a2,('OH/H$_2$O$_{mol}$: \n'+'%.3f' % ratioOH_H2O+'\n+/-'+'%.3f' % eseratioOH_H2O),color='r',fontsize = 16)
##### output of data, fitted peaks, parameters, and the figure in pdf
##### all goes into the ./deconv/ folder
name.rfind('/')
nameout = name[name.rfind('/')+1::]
namesample = nameout[0:nameout.find('.')]
pathint = str('/deconv/') # the output folder
ext1 = '_ydec.txt'
ext2 = '_params.txt'
ext3 = '.pdf'
pathout1 = pathbeg+pathint+namesample+ext1
pathout2 = pathbeg+pathint+namesample+ext2
pathout3 = pathbeg+pathint+namesample+ext3
matout = np.vstack((xfit,data,yout,peak1,peak2))
matout = np.transpose(matout)
np.savetxt(pathout1,matout) # saving the arrays of spectra
fd = os.open( pathout2, os.O_RDWR|os.O_CREAT ) # Open a file and create it if it do not exist
fo = os.fdopen(fd, "w+") # Now get a file object for the above file.
fo.write(model) # write the parameters in it
fo.close()
savefig(pathout3) # save the figure
| gpl-2.0 |
queria/my-tempest | tempest/api_schema/response/compute/keypairs.py | 11 | 2237 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_keypairs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypairs': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'}
},
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
},
'required': ['keypairs']
}
}
create_keypair = {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'fingerprint': {'type': 'string'},
'name': {'type': 'string'},
'public_key': {'type': 'string'},
'user_id': {'type': 'string'},
'private_key': {'type': 'string'}
},
# When create keypair API is being called with 'Public key'
# (Importing keypair) then, response body does not contain
# 'private_key' So it is not defined as 'required'
'required': ['fingerprint', 'name', 'public_key', 'user_id']
}
},
'required': ['keypair']
}
| apache-2.0 |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/pip/commands/wheel.py | 328 | 7320 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.log import logger
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.util import normalize_path
from pip.wheel import WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not recompiling your software during every install.
For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help="Build wheels into <dir>, where the default is '<cwd>/wheelhouse'.")
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
except ImportError:
raise CommandError("'pip wheel' requires the 'wheel' package. To fix this, run: pip install wheel")
try:
import pkg_resources
except ImportError:
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
else:
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
session = self._build_session(options)
finder = PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
options.build_dir = os.path.abspath(options.build_dir)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=None,
download_dir=None,
download_cache=options.download_cache,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
session=session,
wheel_download_dir=options.wheel_dir
)
# make the wheelhouse
if not os.path.exists(options.wheel_dir):
os.makedirs(options.wheel_dir)
#parse args and/or requirements files
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder,
options=options,
session=session):
if req.editable:
logger.notify("ignoring %s" % req.url)
continue
requirement_set.add_requirement(req)
#fail if no requirements
if not requirement_set.has_requirements:
opts = {'name': self.name}
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.error(msg)
return
try:
#build wheels
wb = WheelBuilder(
requirement_set,
finder,
options.wheel_dir,
build_options = options.build_options or [],
global_options = options.global_options or []
)
wb.build()
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
| apache-2.0 |
jspricke/weechat-android | releases/publish_playstore.py | 2 | 2195 | #!/usr/bin/env python
import httplib2
import os
import sys
from apiclient.discovery import build
from oauth2client import client
from oauth2client.service_account import ServiceAccountCredentials
#TRACK = 'production'
TRACK = 'beta'
SERVICE_ACCT_EMAIL = os.environ['GOOGLEPLAY_ACCT_EMAIL']
KEY = 'releases/google-play-key.p12'
PKG_NAME = 'com.ubergeek42.WeechatAndroid.dev'
APK = "weechat-android/build/outputs/apk/weechat-android-devrelease.apk"
def main():
credentials = ServiceAccountCredentials.from_p12_keyfile(
SERVICE_ACCT_EMAIL, KEY,
scopes=['https://www.googleapis.com/auth/androidpublisher'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build('androidpublisher', 'v2', http=http)
try:
edit_request = service.edits().insert(body={}, packageName=PKG_NAME)
result = edit_request.execute()
edit_id = result['id']
apk_response = service.edits().apks().upload(
editId=edit_id,
packageName=PKG_NAME,
media_body=APK).execute()
print 'Version code %d has been uploaded' % apk_response['versionCode']
track_response = service.edits().tracks().update(
editId=edit_id,
track=TRACK,
packageName=PKG_NAME,
body={u'versionCodes': [apk_response['versionCode']]}).execute()
print 'Track %s is set for version code(s) %s' % (
track_response['track'], str(track_response['versionCodes']))
commit_request = service.edits().commit(
editId=edit_id, packageName=PKG_NAME).execute()
print 'Edit "%s" has been committed' % (commit_request['id'])
except client.AccessTokenRefreshError:
print ('The credentials have been revoked/expired, please re-run the'
' application to re-authorize')
if __name__ == '__main__':
if os.environ.get('TRAVIS_BRANCH', 'undefined') != 'master':
print "Can't publish play store app for any branch except master"
sys.exit(0)
if os.environ.get('TRAVIS_PULL_REQUEST', None) != "false":
print "Can't publish play store app for pull requests"
sys.exit(0)
main()
| apache-2.0 |
subutai/nupic.research | tests/unit/frameworks/tensorflow/k_winner_test.py | 3 | 7243 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import random
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.models import Model
from nupic.research.frameworks.tensorflow.layers.k_winners import KWinners, KWinners2d
tf.enable_eager_execution()
SEED = 18
# Tensorflow configuration.
# Make sure to use one thread in order to keep the results deterministic
CONFIG = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
device_count={"CPU": 1},
)
class KWinnersTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
tf.set_random_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
def setUp(self):
self.x = tf.constant(
[[1.0, 1.2, 1.1, 1.3, 1.0, 1.5, 1.0],
[1.1, 1.0, 1.2, 1.0, 1.3, 1.0, 1.2]]
)
self.duty_cycle = tf.constant(1.0 / 3.0, shape=(2, 7))
@test_util.run_all_in_graph_and_eager_modes
def test_inference(self):
"""boost factor 0, k=3, batch size 2"""
expected = np.zeros(self.x.shape)
expected[0, 1] = 1.2
expected[0, 3] = 1.3
expected[0, 5] = 1.5
expected[1, 2] = 1.2
expected[1, 4] = 1.3
expected[1, 6] = 1.2
batch_size = self.x.shape[0]
input_shape = self.x.shape[1:]
n = np.prod(input_shape)
k_winners = KWinners(percent_on=3 / n, boost_strength=0.0)
inp = Input(batch_size=batch_size, shape=input_shape)
out = k_winners(inp, training=False)
model = Model(inp, out)
with self.test_session(config=CONFIG):
y = model.predict(self.x, steps=1, batch_size=batch_size)
self.assertAllClose(y, expected)
class KWinners2dTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
tf.set_random_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
def setUp(self):
# Batch size 1
x = np.ones((1, 3, 2, 2), dtype=np.float32)
x[0, 0, 1, 0] = 1.1
x[0, 0, 1, 1] = 1.2
x[0, 1, 0, 1] = 1.2
x[0, 2, 1, 0] = 1.3
self.x = tf.constant(x)
# Batch size 2
x = np.ones((2, 3, 2, 2), dtype=np.float32)
x[0, 0, 1, 0] = 1.1
x[0, 0, 1, 1] = 1.2
x[0, 1, 0, 1] = 1.2
x[0, 2, 1, 0] = 1.3
x[1, 0, 0, 0] = 1.4
x[1, 1, 0, 0] = 1.5
x[1, 1, 0, 1] = 1.6
x[1, 2, 1, 1] = 1.7
self.x2 = tf.constant(x)
@test_util.run_all_in_graph_and_eager_modes
def test_one(self):
"""Equal duty cycle, boost factor 0, k=4, batch size 1."""
x = self.x
expected = np.zeros_like(x)
expected[0, 0, 1, 0] = 1.1
expected[0, 0, 1, 1] = 1.2
expected[0, 1, 0, 1] = 1.2
expected[0, 2, 1, 0] = 1.3
input_shape = x.shape[1:]
n = np.prod(input_shape)
k = 4
with self.cached_session(config=CONFIG):
k_winners = KWinners2d(percent_on=k / n, boost_strength=0.0)
k_winners.build(x.shape)
y = k_winners(x, training=True)
self.assertAllClose(y, expected)
@test_util.run_all_in_graph_and_eager_modes
def test_two(self):
"""Equal duty cycle, boost factor 0, k=3."""
x = self.x
expected = np.zeros_like(x)
expected[0, 0, 1, 1] = 1.2
expected[0, 1, 0, 1] = 1.2
expected[0, 2, 1, 0] = 1.3
input_shape = x.shape[1:]
n = np.prod(input_shape)
k = 3
with self.cached_session(config=CONFIG):
k_winners = KWinners2d(percent_on=k / n, boost_strength=0.0)
k_winners.build(x.shape)
y = k_winners(x, training=True)
self.assertAllClose(y, expected)
@test_util.run_all_in_graph_and_eager_modes
def test_three(self):
"""Equal duty cycle, boost factor=0, k=4, batch size=2."""
x = self.x2
expected = np.zeros_like(x)
expected[0, 0, 1, 0] = 1.1
expected[0, 0, 1, 1] = 1.2
expected[0, 1, 0, 1] = 1.2
expected[0, 2, 1, 0] = 1.3
expected[1, 0, 0, 0] = 1.4
expected[1, 1, 0, 0] = 1.5
expected[1, 1, 0, 1] = 1.6
expected[1, 2, 1, 1] = 1.7
input_shape = x.shape[1:]
n = np.prod(input_shape)
k = 4
with self.cached_session(config=CONFIG):
k_winners = KWinners2d(percent_on=k / n, boost_strength=0.0)
k_winners.build(x.shape)
y = k_winners(x, training=True)
self.assertAllClose(y, expected)
@test_util.run_all_in_graph_and_eager_modes
def test_four(self):
"""Equal duty cycle, boost factor=0, k=3, batch size=2."""
x = self.x2
expected = np.zeros_like(x)
expected[0, 0, 1, 1] = 1.2
expected[0, 1, 0, 1] = 1.2
expected[0, 2, 1, 0] = 1.3
expected[1, 1, 0, 0] = 1.5
expected[1, 1, 0, 1] = 1.6
expected[1, 2, 1, 1] = 1.7
input_shape = x.shape[1:]
n = np.prod(input_shape)
k = 3
with self.cached_session(config=CONFIG):
k_winners = KWinners2d(percent_on=k / n, boost_strength=0.0)
k_winners.build(x.shape)
y = k_winners(x, training=True)
self.assertAllClose(y, expected)
@test_util.run_all_in_graph_and_eager_modes
def test_five(self):
x = self.x2
expected = np.zeros_like(x)
expected[0, 0, 1, 0] = 1.1
expected[0, 0, 1, 1] = 1.2
expected[0, 1, 0, 1] = 1.2
expected[0, 2, 1, 0] = 1.3
expected[1, 0, 0, 0] = 1.4
expected[1, 1, 0, 0] = 1.5
expected[1, 1, 0, 1] = 1.6
expected[1, 2, 1, 1] = 1.7
expected_dutycycles = tf.constant([1.5000, 1.5000, 1.0000]) / 4.0
with self.cached_session(config=CONFIG):
k_winners = KWinners2d(
percent_on=0.333,
boost_strength=1.0,
k_inference_factor=0.5,
boost_strength_factor=0.5,
duty_cycle_period=1000,
)
k_winners.build(x.shape)
y = k_winners(x, training=True)
self.assertAllClose(y, expected)
self.assertAllClose(k_winners.duty_cycles, expected_dutycycles)
if __name__ == "__main__":
tf.test.main()
| agpl-3.0 |
tarikgwa/nfd | newfies/mod_registration/forms.py | 1 | 1850 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import SetPasswordForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Div, Field
from crispy_forms.bootstrap import FormActions
class ForgotForm(forms.Form):
"""Forgot password Form"""
email = forms.EmailField(max_length=60, label=_('Email'), required=True)
email.widget.attrs['class'] = 'form-control'
def __init__(self, *args, **kwargs):
super(ForgotForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'well'
self.helper.layout = Layout(
Div(
Div('email', css_class='col-md-4'),
css_class='row'
),
FormActions(Submit('submit', _('Reset my password')))
)
class CustomSetPasswordForm(SetPasswordForm):
"""Set Password Form"""
def __init__(self, *args, **kwargs):
super(CustomSetPasswordForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Div(
Field('new_password1'),
Field('new_password2'),
Submit('submit', _('Change my password')),
css_class='col-md-4'
),
css_class='well col-md-12'
),
)
| mpl-2.0 |
xcgoner/dist-mxnet | docs/build_version_doc/AddPackageLink.py | 6 | 2930 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
from bs4 import BeautifulSoup as bs
parser = argparse.ArgumentParser(description="Add download package link.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--file_path', type=str, default='docs/_build/html/get_started/install.html',
help='file to be modified')
parser.add_argument('--current_version', type=str, default='master',
help='Current version')
if __name__ == '__main__':
args = parser.parse_args()
tag = args.current_version
src_url = "http://www.apache.org/dyn/closer.cgi/incubator/" \
"mxnet/%s-incubating/apache-mxnet-src-%s-incubating.tar.gz" % (tag, tag)
pgp_url = "http://www.apache.org/dyn/closer.cgi/incubator/" \
"mxnet/%s-incubating/apache-mxnet-src-%s-incubating.tar.gz.asc" % (tag, tag)
sha_url = "http://www.apache.org/dyn/closer.cgi/incubator/" \
"mxnet/%s-incubating/apache-mxnet-src-%s-incubating.tar.gz.sha" % (tag, tag)
md5_url = "http://www.apache.org/dyn/closer.cgi/incubator/" \
"mxnet/%s-incubating/apache-mxnet-src-%s-incubating.tar.gz.md5" % (tag, tag)
download_str = "<div class='btn-group' role='group'>"
download_str += "<div class='download_btn'><a href=%s>" \
"<span class='glyphicon glyphicon-download-alt'></span>" \
" Source for %s</a></div>" % (src_url, tag)
download_str += "<div class='download_btn'><a href=%s>PGP</a></div>" % (pgp_url)
download_str += "<div class='download_btn'><a href=%s>SHA-256</a></div>" % (sha_url)
download_str += "<div class='download_btn'><a href=%s>MD5</a></div>" % (md5_url)
download_str += "</div>"
with open(args.file_path, 'r') as html_file:
content = bs(html_file, 'html.parser')
download_div = content.find(id="download-source-package")
download_div['style'] = "display:block"
download_div.append(download_str)
outstr = str(content).replace('<', '<').replace('>', '>')
with open(args.file_path, 'w') as outf:
outf.write(outstr) | apache-2.0 |
benjamindeleener/odoo | addons/base_import/odf_ods_reader.py | 69 | 3584 | # Copyright 2011 Marco Conti
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# sourced from https://github.com/marcoconti83/read-ods-with-odfpy
# further altered locally
from odf import opendocument
from odf.table import Table, TableRow, TableCell
from odf.text import P
class ODSReader(object):
# loads the file
def __init__(self, file=None, content=None, clonespannedcolumns=None):
if not content:
self.clonespannedcolumns = clonespannedcolumns
self.doc = opendocument.load(file)
else:
self.clonespannedcolumns = clonespannedcolumns
self.doc = content
self.SHEETS = {}
for sheet in self.doc.spreadsheet.getElementsByType(Table):
self.readSheet(sheet)
# reads a sheet in the sheet dictionary, storing each sheet as an
# array (rows) of arrays (columns)
def readSheet(self, sheet):
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
arrCells = []
cells = row.getElementsByType(TableCell)
# for each cell
for count, cell in enumerate(cells, start=1):
# repeated value?
repeat = 0
if count != len(cells):
repeat = cell.getAttribute("numbercolumnsrepeated")
if not repeat:
repeat = 1
spanned = int(cell.getAttribute('numbercolumnsspanned') or 0)
# clone spanned cells
if self.clonespannedcolumns is not None and spanned > 1:
repeat = spanned
ps = cell.getElementsByType(P)
textContent = u""
# for each text/text:span node
for p in ps:
for n in p.childNodes:
if n.nodeType == 1 and n.tagName == "text:span":
for c in n.childNodes:
if c.nodeType == 3:
textContent = u'{}{}'.format(textContent, n.data)
if n.nodeType == 3:
textContent = u'{}{}'.format(textContent, n.data)
if textContent:
if not textContent.startswith("#"): # ignore comments cells
for rr in range(int(repeat)): # repeated?
arrCells.append(textContent)
else:
for rr in range(int(repeat)):
arrCells.append("")
# if row contained something
if arrCells:
arrRows.append(arrCells)
#else:
# print ("Empty or commented row (", row_comment, ")")
self.SHEETS[name] = arrRows
# returns a sheet as an array (rows) of arrays (columns)
def getSheet(self, name):
return self.SHEETS[name]
def getFirstSheet(self):
return next(iter(self.SHEETS.itervalues()))
| gpl-3.0 |
rmboggs/django | django/contrib/gis/gdal/envelope.py | 477 | 7009 | """
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import GDALException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise GDALException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise GDALException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
| bsd-3-clause |
toolmacher/micropython | tests/basics/try_finally1.py | 100 | 1110 | print("noexc-finally")
try:
print("try")
finally:
print("finally")
print("noexc-finally-finally")
try:
print("try1")
try:
print("try2")
finally:
print("finally2")
finally:
print("finally1")
print()
print("noexc-finally-func-finally")
def func2():
try:
print("try2")
finally:
print("finally2")
try:
print("try1")
func2()
finally:
print("finally1")
print()
print("exc-finally-except")
try:
print("try1")
try:
print("try2")
foo()
except:
print("except2")
finally:
print("finally1")
print()
print("exc-finally-except-filter")
try:
print("try1")
try:
print("try2")
foo()
except NameError:
print("except2")
finally:
print("finally1")
print()
print("exc-except-finally-finally")
try: # top-level catch-all except to not fail script
try:
print("try1")
try:
print("try2")
foo()
finally:
print("finally2")
finally:
print("finally1")
except:
print("catch-all except")
print()
| mit |
cpollard1001/FreeCAD_sf_master | src/Mod/Ship/Instance.py | 17 | 9052 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import time
from math import *
from PySide import QtGui, QtCore
from pivy.coin import *
from pivy import coin
import FreeCAD
import FreeCADGui
from FreeCAD import Base, Vector
import Part
from shipUtils import Paths, Math
class Ship:
def __init__(self, obj, solids):
""" Transform a generic object to a ship instance.
Keyword arguments:
obj -- Part::FeaturePython created object which should be transformed
in a ship instance.
solids -- Set of solids which will compound the ship hull.
"""
# Add an unique property to identify the Ship instances
tooltip = str(QtGui.QApplication.translate(
"Ship",
"True if it is a valid ship instance, False otherwise",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("App::PropertyBool",
"IsShip",
"Ship",
tooltip).IsShip = True
# Add the main dimensions
tooltip = str(QtGui.QApplication.translate(
"Ship",
"Ship length [m]",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("App::PropertyLength",
"Length",
"Ship",
tooltip).Length = 0.0
tooltip = str(QtGui.QApplication.translate(
"Ship",
"Ship breadth [m]",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("App::PropertyLength",
"Breadth",
"Ship",
tooltip).Breadth = 0.0
tooltip = str(QtGui.QApplication.translate(
"Ship",
"Ship draft [m]",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("App::PropertyLength",
"Draft",
"Ship",
tooltip).Draft = 0.0
# Add the subshapes
obj.Shape = Part.makeCompound(solids)
tooltip = str(QtGui.QApplication.translate(
"Ship",
"Set of external faces of the ship hull",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("Part::PropertyPartShape",
"ExternalFaces",
"Ship",
tooltip)
tooltip = str(QtGui.QApplication.translate(
"Ship",
"Set of weight instances",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("App::PropertyStringList",
"Weights",
"Ship",
tooltip).Weights = []
tooltip = str(QtGui.QApplication.translate(
"Ship",
"Set of tank instances",
None,
QtGui.QApplication.UnicodeUTF8))
obj.addProperty("App::PropertyStringList",
"Tanks",
"Ship",
tooltip).Tanks = []
obj.Proxy = self
def onChanged(self, fp, prop):
"""Detects the ship data changes.
Keyword arguments:
fp -- Part::FeaturePython object affected.
prop -- Modified property name.
"""
if prop == "Length" or prop == "Breadth" or prop == "Draft":
pass
def execute(self, fp):
"""Detects the entity recomputations.
Keyword arguments:
fp -- Part::FeaturePython object affected.
"""
fp.Shape = Part.makeCompound(fp.Shape.Solids)
class ViewProviderShip:
def __init__(self, obj):
"""Add this view provider to the selected object.
Keyword arguments:
obj -- Object which must be modified.
"""
obj.Proxy = self
def attach(self, obj):
"""Setup the scene sub-graph of the view provider, this method is
mandatory.
"""
return
def updateData(self, fp, prop):
"""If a property of the handled feature has changed we have the chance
to handle this here.
Keyword arguments:
fp -- Part::FeaturePython object affected.
prop -- Modified property name.
"""
return
def getDisplayModes(self, obj):
"""Return a list of display modes.
Keyword arguments:
obj -- Object associated with the view provider.
"""
modes = []
return modes
def getDefaultDisplayMode(self):
"""Return the name of the default display mode. It must be defined in
getDisplayModes."""
return "Shaded"
def setDisplayMode(self, mode):
"""Map the display mode defined in attach with those defined in
getDisplayModes. Since they have the same names nothing needs to be
done. This method is optinal.
Keyword arguments:
mode -- Mode to be activated.
"""
return mode
def onChanged(self, vp, prop):
"""Detects the ship view provider data changes.
Keyword arguments:
vp -- View provider object affected.
prop -- Modified property name.
"""
pass
def __getstate__(self):
"""When saving the document this object gets stored using Python's
cPickle module. Since we have some un-pickable here (the Coin stuff)
we must define this method to return a tuple of all pickable objects
or None.
"""
return None
def __setstate__(self, state):
"""When restoring the pickled object from document we have the chance
to set some internals here. Since no data were pickled nothing needs
to be done here.
"""
return None
def claimChildren(self):
objs = []
# Locate the owner ship object
doc_objs = FreeCAD.ActiveDocument.Objects
obj = None
for doc_obj in doc_objs:
try:
v_provider = doc_obj.ViewObject.Proxy
if v_provider == self:
obj = doc_obj
except:
continue
if obj is None:
FreeCAD.Console.PrintError("Orphan view provider found...\n")
FreeCAD.Console.PrintError(self)
FreeCAD.Console.PrintError('\n')
return objs
# Claim the weights
bad_linked = 0
for i, w in enumerate(obj.Weights):
try:
w_obj = FreeCAD.ActiveDocument.getObject(w)
objs.append(w_obj)
except:
del obj.Weights[i - bad_linked]
bad_linked += 1
# Claim the tanks
bad_linked = 0
for i, t in enumerate(obj.Tanks):
try:
t_obj = FreeCAD.ActiveDocument.getObject(t)
objs.append(t_obj)
except:
del obj.Tanks[i - bad_linked]
bad_linked += 1
return objs
def getIcon(self):
"""Returns the icon for this kind of objects."""
return ":/icons/Ship_Instance.svg"
| lgpl-2.1 |
armab/st2 | st2client/st2client/utils/logging.py | 9 | 2015 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
__all__ = [
'LogLevelFilter',
'set_log_level_for_all_handlers',
'set_log_level_for_all_loggers'
]
class LogLevelFilter(logging.Filter):
"""
Filter which excludes log messages which match the provided log levels.
"""
def __init__(self, log_levels):
self._log_levels = log_levels
def filter(self, record):
level = record.levelno
if level in self._log_levels:
return False
return True
def set_log_level_for_all_handlers(logger, level=logging.DEBUG):
"""
Set a log level for all the handlers on the provided logger.
"""
logger.setLevel(level)
handlers = logger.handlers
for handler in handlers:
handler.setLevel(level)
return logger
def set_log_level_for_all_loggers(level=logging.DEBUG):
"""
Set a log level for all the loggers and handlers to the provided level.
"""
root_logger = logging.getLogger()
loggers = logging.Logger.manager.loggerDict.values()
loggers += [root_logger]
for logger in loggers:
if not isinstance(logger, logging.Logger):
continue
set_log_level_for_all_handlers(logger=logger)
| apache-2.0 |
dharmabumstead/ansible | lib/ansible/utils/unicode.py | 158 | 1166 | # (c) 2012-2014, Toshio Kuratomi <a.badger@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
__all__ = ('unicode_wrap')
def unicode_wrap(func, *args, **kwargs):
"""If a function returns a string, force it to be a text string.
Use with partial to ensure that filter plugins will return text values.
"""
return to_text(func(*args, **kwargs), nonstring='passthru')
| gpl-3.0 |
argonemyth/argonemyth-blog | blog/migrations/0008_auto__add_field_photo_orientation.py | 1 | 8412 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Photo.orientation'
db.add_column(u'blog_photo', 'orientation',
self.gf('django.db.models.fields.CharField')(default='landscape', max_length=20),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Photo.orientation'
db.delete_column(u'blog_photo', 'orientation')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'blog.blogcategory': {
'Meta': {'ordering': "('position', 'title')", 'object_name': 'BlogCategory'},
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'blog.blogpost': {
'Meta': {'ordering': "('-date_published',)", 'object_name': 'BlogPost'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'blogposts'", 'null': 'True', 'to': u"orm['blog.BlogCategory']"}),
'comment_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_expired': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'blog.photo': {
'Meta': {'ordering': "['post', 'position']", 'object_name': 'Photo'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'orientation': ('django.db.models.fields.CharField', [], {'default': "'landscape'", 'max_length': '20'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': u"orm['blog.BlogPost']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['blog'] | gpl-3.0 |
astroScott/TeachingUtilities | classutils.py | 1 | 5241 | import os
from configparser import ConfigParser
import string
from numpy import std, mean
def read_config(file = 'configdata.cfg'):
"""
read the configuration file, by default configdata.cfg.
returns a dict containing relevant config info.
"""
config = ConfigParser()
config.read(file)
assert(os.path.isfile(config['Config']['csvfile'].strip()))
return config
def reader(filename, delimiter=',', quotechar="\""):
f = open(filename,'r')
data = []
for line in f:
line = ''.join(filter(lambda x:x in string.printable, line))
line=line.replace(quotechar, "")
data.append([item.strip() for item in line.split(delimiter)])
header = data.pop(0)
return [Student(dict(zip(header, item))) for item in data ]
def reverse_lookup(dic, key):
for k, v in dic.items():
if v==key:
return k
raise KeyError(key)
cfg_dict = read_config()
aliases = cfg_dict['Column Headers']
assignment_map={}
def convert(string, char='?'):
"""
this converts a messier header name to something neater.
key formats specified in config file where identifiers notated by \'?\'
example:
in config file we specify:
conc? = Conclusion?blahblahblah 88458924532453
in actual datafile we find Conclusion02blahblahblah 88458924532453
which maps over to
conc02
"""
#need to find a better, more general name to parse headers
for key, value in cfg_dict["Assignments"].items():
val=value.split(char)
val[0] = val[0].strip()
val[1] = val[1].strip()
if val[0] in string:
unique=string.replace(val[0],"")[0:2]
ret_val = key.split(char)
assignment_map[string]=ret_val[0]+unique
return ret_val[0]+unique
return None
class Student(object):
def __init__(self, row,char='?'):
"""row is a dict"""
aliases = cfg_dict['Column Headers']
for k, v in aliases.items():
setattr(self,k,row[v])
for key, val in row.items():
key=key.strip()
if not key in assignment_map.keys():
key=convert(key)
try:
newkey=assignment_map[key]
#print(newkey)
setattr(self,newkey,val)
except KeyError:
pass
def __str__(self):
return self.firstname+' '+self.lastname
def get_total(self):
"""
get the totals for a student.
input:
----------------------------
student: an instance of Student
cfg_dict: the configuration dictionary defined in classUtils.read_config
output:
----------------------------
score summed over all assignments
"""
summ = 0.
for key in list(self.__dict__.keys()):
if key in assignment_map.values():
try:
summ+=float(getattr(self,key))
except:
if getattr(self,key)=='' or getattr(self,key)==None:
continue
else:
raise ValueError("cannot convert: "+str(getattr(self,key)))
return summ
@staticmethod
def get_list(section, data):
if type(data) is str:
data=reader(data)
return [Student(item) for item in data if item[aliases['section']]==section]
@staticmethod
def get_column(studentlist, key):
"""get all values for all students in a section"""
key=str(key)
assert(not key is None)
#try:
# key=reverse_lookup(assignment_map, key)
# assert(not key is None)
#except KeyError:
# for key, val in assignment_map.items():
# print(key, val)
# raise
try:
return [getattr(item, key) for item in studentlist]
except TypeError:
print(key)
raise
except:
print(assignment_map)
print(key)
print(dir(studentlist[0]))
raise
@staticmethod
def getStats(assignment, studentlist):
"""
input: assignment name (str), section number
output: tuple of (mean, stdev). returns None on failure
"""
try:
assert(assignment in assignment_map.values())
except:
raise Exception(str(assignment)+" not in "+str(assignment_map.values()))
col = Student.get_column(studentlist, assignment)
for i in range(0,len(col)):
try:
col[i]=float(col[i])
except ValueError:
col[i]=0.
return mean(col), std(col), col
@staticmethod
def get_all_stats(studentlist):
lst = [item.get_total() for item in studentlist]
return mean(lst), std(lst), lst
@staticmethod
def get_emails(studentlist, ext=cfg_dict["Email"]["emailext"]):
return [item.username+'@'+ext for item in studentlist]
all_students=reader(cfg_dict["Config"]["csvfile"])
def get_section(section):
return [item for item in all_students if item.section==section]
| gpl-2.0 |
rgerkin/neuroConstruct | lib/jython/Lib/pawt/__init__.py | 109 | 1039 | import sys
from java import awt
def test(panel, size=None, name='AWT Tester'):
f = awt.Frame(name, windowClosing=lambda event: sys.exit(0))
if hasattr(panel, 'init'):
panel.init()
f.add('Center', panel)
f.pack()
if size is not None:
f.setSize(apply(awt.Dimension, size))
f.setVisible(1)
return f
class GridBag:
def __init__(self, frame, **defaults):
self.frame = frame
self.gridbag = awt.GridBagLayout()
self.defaults = defaults
frame.setLayout(self.gridbag)
def addRow(self, widget, **kw):
kw['gridwidth'] = 'REMAINDER'
apply(self.add, (widget, ), kw)
def add(self, widget, **kw):
constraints = awt.GridBagConstraints()
for key, value in self.defaults.items()+kw.items():
if isinstance(value, type('')):
value = getattr(awt.GridBagConstraints, value)
setattr(constraints, key, value)
self.gridbag.setConstraints(widget, constraints)
self.frame.add(widget)
| gpl-2.0 |
cnsuperx/Cocos2d-x-2.2.5 | external/emscripten/tests/freetype/src/tools/docmaker/sources.py | 367 | 10766 | # Sources (c) 2002, 2003, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<(\w*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@(\w*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@(\w*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof
| mit |
mSenyor/sl4a | python/gdata/src/atom/url.py | 280 | 4277 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urlparse
import urllib
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urlparse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urlparse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()
| apache-2.0 |
niltonlk/nest-simulator | pynest/nest/tests/test_connect_arrays_mpi.py | 14 | 7823 | # -*- coding: utf-8 -*-
#
# test_connect_arrays_mpi.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
This file contains two TestCases, and must be run from nosetests or pytest.
It requires NEST to be built with MPI and the Python package mpi4py.
The file can be run in two modes: with a single process, or with multiple MPI
processes. If run with multiple processes, the ConnectArraysMPICase TestCase
is run, which tests connecting with arrays when using multiple MPI processes.
If run with a single process, the TestConnectArraysMPI TestCase is run,
which runs ConnectArraysMPICase in a subprocess with multiple MPI processes,
and fails if any of the tests in ConnectArraysMPICase fail.
"""
import os
import subprocess as sp
import unittest
import nest
import numpy as np
try:
from mpi4py import MPI
HAVE_MPI4PY = True
except ImportError:
HAVE_MPI4PY = False
HAVE_MPI = nest.ll_api.sli_func("statusdict/have_mpi ::")
MULTIPLE_PROCESSES = nest.NumProcesses() > 1
@unittest.skipIf(not HAVE_MPI4PY, 'mpi4py is not available')
class ConnectArraysMPICase(unittest.TestCase):
"""
This TestCase uses mpi4py to collect and assert results from all
processes, and is supposed to only be run with multiple processes.
If running with nosetests or pytest, this TestCase is ignored when
run with a single process, and called from TestConnectArraysMPI using
multiple processes.
"""
non_unique = np.array([1, 1, 3, 5, 4, 5, 9, 7, 2, 8], dtype=np.uint64)
# The test class is instantiated by the unittest framework regardless of the value of
# HAVE_MPI4PY, even though all tests will be skipped in case it is False. In this
# situation, we have to manually prevent calls to MPI in order to avoid errors during
# the execution.
if HAVE_MPI4PY:
comm = MPI.COMM_WORLD.Clone()
# With pytest or nosetests, only run these tests if using multiple processes
__test__ = MULTIPLE_PROCESSES
def assert_connections(self, expected_sources, expected_targets, expected_weights, expected_delays, rule):
"""Gather connections from all processes and assert against expected connections"""
conns = nest.GetConnections()
projections = [[s, t] for s, t in zip(conns.source, conns.target)]
weights = conns.weight
delays = conns.delay
if rule == 'one_to_one':
expected_projections = np.array([[s, t] for s, t in zip(expected_sources, expected_targets)])
elif rule == 'all_to_all':
expected_projections = np.array([[s, t] for s in expected_sources for t in expected_targets])
else:
self.assertFalse(True, 'rule={} is not valid'.format(rule))
expected_weights = (expected_weights if type(expected_weights) is np.ndarray
else expected_weights*np.ones(len(expected_projections)))
expected_delays = (expected_delays if type(expected_delays) is np.ndarray
else expected_delays*np.ones(len(expected_projections)))
recv_projections = self.comm.gather(projections, root=0)
recv_weights = self.comm.gather(weights, root=0)
recv_delays = self.comm.gather(delays, root=0)
if self.comm.Get_rank() == 0:
# Flatten the projection lists to a single list of projections
recv_projections = np.array([proj for part in recv_projections for proj in part])
recv_weights = np.array([w for part in recv_weights for w in part])
recv_delays = np.array([proj for part in recv_delays for proj in part])
# Results must be sorted to make comparison possible
np.testing.assert_array_equal(np.sort(recv_projections, axis=0), np.sort(expected_projections, axis=0))
np.testing.assert_array_almost_equal(np.sort(recv_weights, axis=0), np.sort(expected_weights, axis=0))
np.testing.assert_array_almost_equal(np.sort(recv_delays, axis=0), np.sort(expected_delays, axis=0))
else:
self.assertIsNone(recv_projections)
self.assertIsNone(recv_weights)
self.assertIsNone(recv_delays)
def setUp(self):
nest.ResetKernel()
def test_connect_arrays_unique(self):
"""Connecting NumPy arrays of unique node IDs with MPI"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = np.arange(1, n+1, dtype=np.uint64)
weight = 1.5
delay = 1.4
nest.Connect(sources, targets, syn_spec={'weight': weight, 'delay': delay})
self.assert_connections(sources, targets, weight, delay, 'all_to_all')
def test_connect_arrays_nonunique(self):
"""Connecting NumPy arrays with non-unique node IDs with MPI"""
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
weights = np.ones(n)
delays = np.ones(n)
nest.Connect(sources, targets, syn_spec={'weight': weights, 'delay': delays},
conn_spec='one_to_one')
self.assert_connections(sources, targets, weights, delays, 'one_to_one')
def test_connect_arrays_threaded(self):
"""Connecting NumPy arrays, threaded with MPI"""
nest.SetKernelStatus({'local_num_threads': 2})
n = 10
nest.Create('iaf_psc_alpha', n)
sources = np.arange(1, n+1, dtype=np.uint64)
targets = self.non_unique
syn_model = 'static_synapse'
weights = np.linspace(0.6, 1.5, len(sources)) # Interval endpoints are carefully selected to get nice values,
delays = np.linspace(0.4, 1.3, len(sources)) # that is, a step of 0.1 between values.
nest.Connect(sources, targets, conn_spec='one_to_one',
syn_spec={'weight': weights,
'delay': delays,
'synapse_model': syn_model})
self.assert_connections(sources, targets, weights, delays, 'one_to_one')
@unittest.skipIf(not HAVE_MPI, 'NEST was compiled without MPI')
@unittest.skipIf(not HAVE_MPI4PY, 'mpi4py is not available')
class TestConnectArraysMPI(unittest.TestCase):
"""
When run with nosetests or pytest, this TestCase runs the ConnectArraysMPICase
with multiple MPI processes in a subprocess. The test fails if any of the tests in
ConnectArraysMPICase fail.
"""
# With nosetests, only run this test if using a single process
__test__ = not MULTIPLE_PROCESSES
def testWithMPI(self):
"""Connect NumPy arrays with MPI"""
directory = os.path.dirname(os.path.realpath(__file__))
script = os.path.realpath(__file__)
test_script = os.path.join(directory, script)
command = nest.ll_api.sli_func("mpirun", 2, "nosetests", test_script)
command = command.split()
my_env = os.environ.copy()
retcode = sp.call(command, env=my_env)
self.assertEqual(retcode, 0, 'Test failed when run with "mpirun -np 2 nosetests [script]"')
if __name__ == '__main__':
raise RuntimeError('This test must be run with nosetests or pytest')
| gpl-2.0 |
2014c2g9/c2g9 | exts/w2/static/Brython2.0.0-20140209-164925/Lib/_imp.py | 115 | 2013 | """(Extremely) low-level import machinery bits as used by importlib and imp."""
class __loader__(object):pass
def _fix_co_filename(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:_fix_co_filename'))
def acquire_lock(*args,**kw):
"""acquire_lock() -> None Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety
when importing modules.
On platforms without threads, this function does nothing."""
raise NotImplementedError("%s:not implemented" % ('_imp.py:acquire_lock'))
def extension_suffixes(*args,**kw):
"""extension_suffixes() -> list of strings Returns the list of file suffixes used to identify extension modules."""
return ['.pyd']
def get_frozen_object(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:get_frozen_object'))
def init_builtin(module,*args,**kw):
return __import__(module)
def init_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:init_frozen'))
def is_builtin(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_builtin'))
def is_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen'))
def is_frozen_package(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen_package'))
def load_dynamic(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:load_dynamic'))
def lock_held(*args,**kw):
"""lock_held() -> boolean Return True if the import lock is currently held, else False.
On platforms without threads, return False."""
raise NotImplementedError("%s:not implemented" % ('_imp.py:lock_held'))
def release_lock(*args,**kw):
"""release_lock() -> None Release the interpreter's import lock.
On platforms without threads, this function does nothing."""
raise NotImplementedError("%s:not implemented" % ('_imp.py:release_lock'))
| gpl-2.0 |
kirca/odoo | addons/account_sequence/account_sequence.py | 39 | 2435 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move(osv.osv):
_inherit = 'account.move'
_columns = {
'internal_sequence_number': fields.char('Internal Number', size=64, readonly=True, help='Internal Sequence Number'),
}
def post(self, cr, uid, ids, context=None):
obj_sequence = self.pool.get('ir.sequence')
res = super(account_move, self).post(cr, uid, ids, context=context)
seq_no = False
for move in self.browse(cr, uid, ids, context=context):
if move.journal_id.internal_sequence_id:
seq_no = obj_sequence.next_by_id(cr, uid, move.journal_id.internal_sequence_id.id, context=context)
if seq_no:
self.write(cr, uid, [move.id], {'internal_sequence_number': seq_no})
return res
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'internal_sequence_id': fields.many2one('ir.sequence', 'Internal Sequence', help="This sequence will be used to maintain the internal number for the journal entries related to this journal."),
}
class account_move_line(osv.osv):
_inherit = "account.move.line"
_columns = {
'internal_sequence_number': fields.related('move_id','internal_sequence_number', type='char', relation='account.move', help='Internal Sequence Number', string='Internal Number'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
saydulk/horizon | openstack_dashboard/dashboards/admin/volumes/volume_types/qos_specs/forms.py | 63 | 3022 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class CreateKeyValuePair(forms.SelfHandlingForm):
# this if for creating a spec key-value pair for an existing QOS Spec
key = forms.CharField(max_length=255, label=_("Key"))
value = forms.CharField(max_length=255, label=_("Value"))
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get(request, qos_spec_id)
# now add new key-value pair to list of specs
specs.specs[data['key']] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Created spec "%s".') % data['key']
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_("Unable to create spec."),
redirect=redirect)
class EditKeyValuePair(forms.SelfHandlingForm):
value = forms.CharField(max_length=255, label=_("Value"))
# update the backend with the new qos spec value
def handle(self, request, data):
key = self.initial['key']
qos_spec_id = self.initial['qos_spec_id']
# build up new 'specs' object with all previous values plus new value
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get_keys(request,
qos_spec_id,
raw=True)
specs.specs[key] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Saved spec "%s".') % key
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_("Unable to edit spec."),
redirect=redirect)
| apache-2.0 |
charlesccychen/beam | sdks/python/apache_beam/runners/worker/statesampler_test.py | 5 | 4459 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for state sampler."""
from __future__ import absolute_import
from __future__ import division
import logging
import time
import unittest
from builtins import range
from apache_beam.runners.worker import statesampler
from apache_beam.utils.counters import CounterFactory
from apache_beam.utils.counters import CounterName
class StateSamplerTest(unittest.TestCase):
def test_basic_sampler(self):
# Set up state sampler.
counter_factory = CounterFactory()
sampler = statesampler.StateSampler('basic', counter_factory,
sampling_period_ms=1)
# Run basic workload transitioning between 3 states.
sampler.start()
with sampler.scoped_state('step1', 'statea'):
time.sleep(0.1)
self.assertEqual(
sampler.current_state().name,
CounterName(
'statea-msecs', step_name='step1', stage_name='basic'))
with sampler.scoped_state('step1', 'stateb'):
time.sleep(0.2 / 2)
self.assertEqual(
sampler.current_state().name,
CounterName(
'stateb-msecs', step_name='step1', stage_name='basic'))
with sampler.scoped_state('step1', 'statec'):
time.sleep(0.3)
self.assertEqual(
sampler.current_state().name,
CounterName(
'statec-msecs', step_name='step1', stage_name='basic'))
time.sleep(0.2 / 2)
sampler.stop()
sampler.commit_counters()
if not statesampler.FAST_SAMPLER:
# The slow sampler does not implement sampling, so we won't test it.
return
# Test that sampled state timings are close to their expected values.
expected_counter_values = {
CounterName('statea-msecs', step_name='step1', stage_name='basic'): 100,
CounterName('stateb-msecs', step_name='step1', stage_name='basic'): 200,
CounterName('statec-msecs', step_name='step1', stage_name='basic'): 300,
}
for counter in counter_factory.get_counters():
self.assertIn(counter.name, expected_counter_values)
expected_value = expected_counter_values[counter.name]
actual_value = counter.value()
deviation = float(abs(actual_value - expected_value)) / expected_value
logging.info('Sampling deviation from expectation: %f', deviation)
self.assertGreater(actual_value, expected_value * 0.75)
self.assertLess(actual_value, expected_value * 1.25)
def test_sampler_transition_overhead(self):
# Set up state sampler.
counter_factory = CounterFactory()
sampler = statesampler.StateSampler('overhead-', counter_factory,
sampling_period_ms=10)
# Run basic workload transitioning between 3 states.
state_a = sampler.scoped_state('step1', 'statea')
state_b = sampler.scoped_state('step1', 'stateb')
state_c = sampler.scoped_state('step1', 'statec')
start_time = time.time()
sampler.start()
for _ in range(100000):
with state_a:
with state_b:
for _ in range(10):
with state_c:
pass
sampler.stop()
elapsed_time = time.time() - start_time
state_transition_count = sampler.get_info().transition_count
overhead_us = 1000000.0 * elapsed_time / state_transition_count
logging.info('Overhead per transition: %fus', overhead_us)
# Conservative upper bound on overhead in microseconds (we expect this to
# take 0.17us when compiled in opt mode or 0.48 us when compiled with in
# debug mode).
self.assertLess(overhead_us, 10.0)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
eeshangarg/oh-mainline | vendor/packages/twisted/twisted/news/nntp.py | 18 | 33334 | # -*- test-case-name: twisted.news.test.test_nntp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
NNTP protocol support.
Maintainer: Jp Calderone
The following protocol commands are currently understood::
LIST LISTGROUP XOVER XHDR
POST GROUP ARTICLE STAT HEAD
BODY NEXT MODE STREAM MODE READER SLAVE
LAST QUIT HELP IHAVE XPATH
XINDEX XROVER TAKETHIS CHECK
The following protocol commands require implementation::
NEWNEWS
XGTITLE XPAT
XTHREAD AUTHINFO NEWGROUPS
Other desired features:
- A real backend
- More robust client input handling
- A control protocol
"""
import time
import types
try:
import cStringIO as StringIO
except:
import StringIO
from twisted.protocols import basic
from twisted.python import log
def parseRange(text):
articles = text.split('-')
if len(articles) == 1:
try:
a = int(articles[0])
return a, a
except ValueError, e:
return None, None
elif len(articles) == 2:
try:
if len(articles[0]):
l = int(articles[0])
else:
l = None
if len(articles[1]):
h = int(articles[1])
else:
h = None
except ValueError, e:
return None, None
return l, h
def extractCode(line):
line = line.split(' ', 1)
if len(line) != 2:
return None
try:
return int(line[0]), line[1]
except ValueError:
return None
class NNTPError(Exception):
def __init__(self, string):
self.string = string
def __str__(self):
return 'NNTPError: %s' % self.string
class NNTPClient(basic.LineReceiver):
MAX_COMMAND_LENGTH = 510
def __init__(self):
self.currentGroup = None
self._state = []
self._error = []
self._inputBuffers = []
self._responseCodes = []
self._responseHandlers = []
self._postText = []
self._newState(self._statePassive, None, self._headerInitial)
def gotAllGroups(self, groups):
"Override for notification when fetchGroups() action is completed"
def getAllGroupsFailed(self, error):
"Override for notification when fetchGroups() action fails"
def gotOverview(self, overview):
"Override for notification when fetchOverview() action is completed"
def getOverviewFailed(self, error):
"Override for notification when fetchOverview() action fails"
def gotSubscriptions(self, subscriptions):
"Override for notification when fetchSubscriptions() action is completed"
def getSubscriptionsFailed(self, error):
"Override for notification when fetchSubscriptions() action fails"
def gotGroup(self, group):
"Override for notification when fetchGroup() action is completed"
def getGroupFailed(self, error):
"Override for notification when fetchGroup() action fails"
def gotArticle(self, article):
"Override for notification when fetchArticle() action is completed"
def getArticleFailed(self, error):
"Override for notification when fetchArticle() action fails"
def gotHead(self, head):
"Override for notification when fetchHead() action is completed"
def getHeadFailed(self, error):
"Override for notification when fetchHead() action fails"
def gotBody(self, info):
"Override for notification when fetchBody() action is completed"
def getBodyFailed(self, body):
"Override for notification when fetchBody() action fails"
def postedOk(self):
"Override for notification when postArticle() action is successful"
def postFailed(self, error):
"Override for notification when postArticle() action fails"
def gotXHeader(self, headers):
"Override for notification when getXHeader() action is successful"
def getXHeaderFailed(self, error):
"Override for notification when getXHeader() action fails"
def gotNewNews(self, news):
"Override for notification when getNewNews() action is successful"
def getNewNewsFailed(self, error):
"Override for notification when getNewNews() action fails"
def gotNewGroups(self, groups):
"Override for notification when getNewGroups() action is successful"
def getNewGroupsFailed(self, error):
"Override for notification when getNewGroups() action fails"
def setStreamSuccess(self):
"Override for notification when setStream() action is successful"
def setStreamFailed(self, error):
"Override for notification when setStream() action fails"
def fetchGroups(self):
"""
Request a list of all news groups from the server. gotAllGroups()
is called on success, getGroupsFailed() on failure
"""
self.sendLine('LIST')
self._newState(self._stateList, self.getAllGroupsFailed)
def fetchOverview(self):
"""
Request the overview format from the server. gotOverview() is called
on success, getOverviewFailed() on failure
"""
self.sendLine('LIST OVERVIEW.FMT')
self._newState(self._stateOverview, self.getOverviewFailed)
def fetchSubscriptions(self):
"""
Request a list of the groups it is recommended a new user subscribe to.
gotSubscriptions() is called on success, getSubscriptionsFailed() on
failure
"""
self.sendLine('LIST SUBSCRIPTIONS')
self._newState(self._stateSubscriptions, self.getSubscriptionsFailed)
def fetchGroup(self, group):
"""
Get group information for the specified group from the server. gotGroup()
is called on success, getGroupFailed() on failure.
"""
self.sendLine('GROUP %s' % (group,))
self._newState(None, self.getGroupFailed, self._headerGroup)
def fetchHead(self, index = ''):
"""
Get the header for the specified article (or the currently selected
article if index is '') from the server. gotHead() is called on
success, getHeadFailed() on failure
"""
self.sendLine('HEAD %s' % (index,))
self._newState(self._stateHead, self.getHeadFailed)
def fetchBody(self, index = ''):
"""
Get the body for the specified article (or the currently selected
article if index is '') from the server. gotBody() is called on
success, getBodyFailed() on failure
"""
self.sendLine('BODY %s' % (index,))
self._newState(self._stateBody, self.getBodyFailed)
def fetchArticle(self, index = ''):
"""
Get the complete article with the specified index (or the currently
selected article if index is '') or Message-ID from the server.
gotArticle() is called on success, getArticleFailed() on failure.
"""
self.sendLine('ARTICLE %s' % (index,))
self._newState(self._stateArticle, self.getArticleFailed)
def postArticle(self, text):
"""
Attempt to post an article with the specified text to the server. 'text'
must consist of both head and body data, as specified by RFC 850. If the
article is posted successfully, postedOk() is called, otherwise postFailed()
is called.
"""
self.sendLine('POST')
self._newState(None, self.postFailed, self._headerPost)
self._postText.append(text)
def fetchNewNews(self, groups, date, distributions = ''):
"""
Get the Message-IDs for all new news posted to any of the given
groups since the specified date - in seconds since the epoch, GMT -
optionally restricted to the given distributions. gotNewNews() is
called on success, getNewNewsFailed() on failure.
One invocation of this function may result in multiple invocations
of gotNewNews()/getNewNewsFailed().
"""
date, timeStr = time.strftime('%y%m%d %H%M%S', time.gmtime(date)).split()
line = 'NEWNEWS %%s %s %s %s' % (date, timeStr, distributions)
groupPart = ''
while len(groups) and len(line) + len(groupPart) + len(groups[-1]) + 1 < NNTPClient.MAX_COMMAND_LENGTH:
group = groups.pop()
groupPart = groupPart + ',' + group
self.sendLine(line % (groupPart,))
self._newState(self._stateNewNews, self.getNewNewsFailed)
if len(groups):
self.fetchNewNews(groups, date, distributions)
def fetchNewGroups(self, date, distributions):
"""
Get the names of all new groups created/added to the server since
the specified date - in seconds since the ecpoh, GMT - optionally
restricted to the given distributions. gotNewGroups() is called
on success, getNewGroupsFailed() on failure.
"""
date, timeStr = time.strftime('%y%m%d %H%M%S', time.gmtime(date)).split()
self.sendLine('NEWGROUPS %s %s %s' % (date, timeStr, distributions))
self._newState(self._stateNewGroups, self.getNewGroupsFailed)
def fetchXHeader(self, header, low = None, high = None, id = None):
"""
Request a specific header from the server for an article or range
of articles. If 'id' is not None, a header for only the article
with that Message-ID will be requested. If both low and high are
None, a header for the currently selected article will be selected;
If both low and high are zero-length strings, headers for all articles
in the currently selected group will be requested; Otherwise, high
and low will be used as bounds - if one is None the first or last
article index will be substituted, as appropriate.
"""
if id is not None:
r = header + ' <%s>' % (id,)
elif low is high is None:
r = header
elif high is None:
r = header + ' %d-' % (low,)
elif low is None:
r = header + ' -%d' % (high,)
else:
r = header + ' %d-%d' % (low, high)
self.sendLine('XHDR ' + r)
self._newState(self._stateXHDR, self.getXHeaderFailed)
def setStream(self):
"""
Set the mode to STREAM, suspending the normal "lock-step" mode of
communications. setStreamSuccess() is called on success,
setStreamFailed() on failure.
"""
self.sendLine('MODE STREAM')
self._newState(None, self.setStreamFailed, self._headerMode)
def quit(self):
self.sendLine('QUIT')
self.transport.loseConnection()
def _newState(self, method, error, responseHandler = None):
self._inputBuffers.append([])
self._responseCodes.append(None)
self._state.append(method)
self._error.append(error)
self._responseHandlers.append(responseHandler)
def _endState(self):
buf = self._inputBuffers[0]
del self._responseCodes[0]
del self._inputBuffers[0]
del self._state[0]
del self._error[0]
del self._responseHandlers[0]
return buf
def _newLine(self, line, check = 1):
if check and line and line[0] == '.':
line = line[1:]
self._inputBuffers[0].append(line)
def _setResponseCode(self, code):
self._responseCodes[0] = code
def _getResponseCode(self):
return self._responseCodes[0]
def lineReceived(self, line):
if not len(self._state):
self._statePassive(line)
elif self._getResponseCode() is None:
code = extractCode(line)
if code is None or not (200 <= code[0] < 400): # An error!
self._error[0](line)
self._endState()
else:
self._setResponseCode(code)
if self._responseHandlers[0]:
self._responseHandlers[0](code)
else:
self._state[0](line)
def _statePassive(self, line):
log.msg('Server said: %s' % line)
def _passiveError(self, error):
log.err('Passive Error: %s' % (error,))
def _headerInitial(self, (code, message)):
if code == 200:
self.canPost = 1
else:
self.canPost = 0
self._endState()
def _stateList(self, line):
if line != '.':
data = filter(None, line.strip().split())
self._newLine((data[0], int(data[1]), int(data[2]), data[3]), 0)
else:
self.gotAllGroups(self._endState())
def _stateOverview(self, line):
if line != '.':
self._newLine(filter(None, line.strip().split()), 0)
else:
self.gotOverview(self._endState())
def _stateSubscriptions(self, line):
if line != '.':
self._newLine(line.strip(), 0)
else:
self.gotSubscriptions(self._endState())
def _headerGroup(self, (code, line)):
self.gotGroup(tuple(line.split()))
self._endState()
def _stateArticle(self, line):
if line != '.':
if line.startswith('.'):
line = line[1:]
self._newLine(line, 0)
else:
self.gotArticle('\n'.join(self._endState())+'\n')
def _stateHead(self, line):
if line != '.':
self._newLine(line, 0)
else:
self.gotHead('\n'.join(self._endState()))
def _stateBody(self, line):
if line != '.':
if line.startswith('.'):
line = line[1:]
self._newLine(line, 0)
else:
self.gotBody('\n'.join(self._endState())+'\n')
def _headerPost(self, (code, message)):
if code == 340:
self.transport.write(self._postText[0].replace('\n', '\r\n').replace('\r\n.', '\r\n..'))
if self._postText[0][-1:] != '\n':
self.sendLine('')
self.sendLine('.')
del self._postText[0]
self._newState(None, self.postFailed, self._headerPosted)
else:
self.postFailed('%d %s' % (code, message))
self._endState()
def _headerPosted(self, (code, message)):
if code == 240:
self.postedOk()
else:
self.postFailed('%d %s' % (code, message))
self._endState()
def _stateXHDR(self, line):
if line != '.':
self._newLine(line.split(), 0)
else:
self._gotXHeader(self._endState())
def _stateNewNews(self, line):
if line != '.':
self._newLine(line, 0)
else:
self.gotNewNews(self._endState())
def _stateNewGroups(self, line):
if line != '.':
self._newLine(line, 0)
else:
self.gotNewGroups(self._endState())
def _headerMode(self, (code, message)):
if code == 203:
self.setStreamSuccess()
else:
self.setStreamFailed((code, message))
self._endState()
class NNTPServer(basic.LineReceiver):
COMMANDS = [
'LIST', 'GROUP', 'ARTICLE', 'STAT', 'MODE', 'LISTGROUP', 'XOVER',
'XHDR', 'HEAD', 'BODY', 'NEXT', 'LAST', 'POST', 'QUIT', 'IHAVE',
'HELP', 'SLAVE', 'XPATH', 'XINDEX', 'XROVER', 'TAKETHIS', 'CHECK'
]
def __init__(self):
self.servingSlave = 0
def connectionMade(self):
self.inputHandler = None
self.currentGroup = None
self.currentIndex = None
self.sendLine('200 server ready - posting allowed')
def lineReceived(self, line):
if self.inputHandler is not None:
self.inputHandler(line)
else:
parts = line.strip().split()
if len(parts):
cmd, parts = parts[0].upper(), parts[1:]
if cmd in NNTPServer.COMMANDS:
func = getattr(self, 'do_%s' % cmd)
try:
func(*parts)
except TypeError:
self.sendLine('501 command syntax error')
log.msg("501 command syntax error")
log.msg("command was", line)
log.deferr()
except:
self.sendLine('503 program fault - command not performed')
log.msg("503 program fault")
log.msg("command was", line)
log.deferr()
else:
self.sendLine('500 command not recognized')
def do_LIST(self, subcmd = '', *dummy):
subcmd = subcmd.strip().lower()
if subcmd == 'newsgroups':
# XXX - this could use a real implementation, eh?
self.sendLine('215 Descriptions in form "group description"')
self.sendLine('.')
elif subcmd == 'overview.fmt':
defer = self.factory.backend.overviewRequest()
defer.addCallbacks(self._gotOverview, self._errOverview)
log.msg('overview')
elif subcmd == 'subscriptions':
defer = self.factory.backend.subscriptionRequest()
defer.addCallbacks(self._gotSubscription, self._errSubscription)
log.msg('subscriptions')
elif subcmd == '':
defer = self.factory.backend.listRequest()
defer.addCallbacks(self._gotList, self._errList)
else:
self.sendLine('500 command not recognized')
def _gotList(self, list):
self.sendLine('215 newsgroups in form "group high low flags"')
for i in list:
self.sendLine('%s %d %d %s' % tuple(i))
self.sendLine('.')
def _errList(self, failure):
print 'LIST failed: ', failure
self.sendLine('503 program fault - command not performed')
def _gotSubscription(self, parts):
self.sendLine('215 information follows')
for i in parts:
self.sendLine(i)
self.sendLine('.')
def _errSubscription(self, failure):
print 'SUBSCRIPTIONS failed: ', failure
self.sendLine('503 program fault - comand not performed')
def _gotOverview(self, parts):
self.sendLine('215 Order of fields in overview database.')
for i in parts:
self.sendLine(i + ':')
self.sendLine('.')
def _errOverview(self, failure):
print 'LIST OVERVIEW.FMT failed: ', failure
self.sendLine('503 program fault - command not performed')
def do_LISTGROUP(self, group = None):
group = group or self.currentGroup
if group is None:
self.sendLine('412 Not currently in newsgroup')
else:
defer = self.factory.backend.listGroupRequest(group)
defer.addCallbacks(self._gotListGroup, self._errListGroup)
def _gotListGroup(self, (group, articles)):
self.currentGroup = group
if len(articles):
self.currentIndex = int(articles[0])
else:
self.currentIndex = None
self.sendLine('211 list of article numbers follow')
for i in articles:
self.sendLine(str(i))
self.sendLine('.')
def _errListGroup(self, failure):
print 'LISTGROUP failed: ', failure
self.sendLine('502 no permission')
def do_XOVER(self, range):
if self.currentGroup is None:
self.sendLine('412 No news group currently selected')
else:
l, h = parseRange(range)
defer = self.factory.backend.xoverRequest(self.currentGroup, l, h)
defer.addCallbacks(self._gotXOver, self._errXOver)
def _gotXOver(self, parts):
self.sendLine('224 Overview information follows')
for i in parts:
self.sendLine('\t'.join(map(str, i)))
self.sendLine('.')
def _errXOver(self, failure):
print 'XOVER failed: ', failure
self.sendLine('420 No article(s) selected')
def xhdrWork(self, header, range):
if self.currentGroup is None:
self.sendLine('412 No news group currently selected')
else:
if range is None:
if self.currentIndex is None:
self.sendLine('420 No current article selected')
return
else:
l = h = self.currentIndex
else:
# FIXME: articles may be a message-id
l, h = parseRange(range)
if l is h is None:
self.sendLine('430 no such article')
else:
return self.factory.backend.xhdrRequest(self.currentGroup, l, h, header)
def do_XHDR(self, header, range = None):
d = self.xhdrWork(header, range)
if d:
d.addCallbacks(self._gotXHDR, self._errXHDR)
def _gotXHDR(self, parts):
self.sendLine('221 Header follows')
for i in parts:
self.sendLine('%d %s' % i)
self.sendLine('.')
def _errXHDR(self, failure):
print 'XHDR failed: ', failure
self.sendLine('502 no permission')
def do_XROVER(self, header, range = None):
d = self.xhdrWork(header, range)
if d:
d.addCallbacks(self._gotXROVER, self._errXROVER)
def _gotXROVER(self, parts):
self.sendLine('224 Overview information follows')
for i in parts:
self.sendLine('%d %s' % i)
self.sendLine('.')
def _errXROVER(self, failure):
print 'XROVER failed: ',
self._errXHDR(failure)
def do_POST(self):
self.inputHandler = self._doingPost
self.message = ''
self.sendLine('340 send article to be posted. End with <CR-LF>.<CR-LF>')
def _doingPost(self, line):
if line == '.':
self.inputHandler = None
group, article = self.currentGroup, self.message
self.message = ''
defer = self.factory.backend.postRequest(article)
defer.addCallbacks(self._gotPost, self._errPost)
else:
self.message = self.message + line + '\r\n'
def _gotPost(self, parts):
self.sendLine('240 article posted ok')
def _errPost(self, failure):
print 'POST failed: ', failure
self.sendLine('441 posting failed')
def do_CHECK(self, id):
d = self.factory.backend.articleExistsRequest(id)
d.addCallbacks(self._gotCheck, self._errCheck)
def _gotCheck(self, result):
if result:
self.sendLine("438 already have it, please don't send it to me")
else:
self.sendLine('238 no such article found, please send it to me')
def _errCheck(self, failure):
print 'CHECK failed: ', failure
self.sendLine('431 try sending it again later')
def do_TAKETHIS(self, id):
self.inputHandler = self._doingTakeThis
self.message = ''
def _doingTakeThis(self, line):
if line == '.':
self.inputHandler = None
article = self.message
self.message = ''
d = self.factory.backend.postRequest(article)
d.addCallbacks(self._didTakeThis, self._errTakeThis)
else:
self.message = self.message + line + '\r\n'
def _didTakeThis(self, result):
self.sendLine('239 article transferred ok')
def _errTakeThis(self, failure):
print 'TAKETHIS failed: ', failure
self.sendLine('439 article transfer failed')
def do_GROUP(self, group):
defer = self.factory.backend.groupRequest(group)
defer.addCallbacks(self._gotGroup, self._errGroup)
def _gotGroup(self, (name, num, high, low, flags)):
self.currentGroup = name
self.currentIndex = low
self.sendLine('211 %d %d %d %s group selected' % (num, low, high, name))
def _errGroup(self, failure):
print 'GROUP failed: ', failure
self.sendLine('411 no such group')
def articleWork(self, article, cmd, func):
if self.currentGroup is None:
self.sendLine('412 no newsgroup has been selected')
else:
if not article:
if self.currentIndex is None:
self.sendLine('420 no current article has been selected')
else:
article = self.currentIndex
else:
if article[0] == '<':
return func(self.currentGroup, index = None, id = article)
else:
try:
article = int(article)
return func(self.currentGroup, article)
except ValueError, e:
self.sendLine('501 command syntax error')
def do_ARTICLE(self, article = None):
defer = self.articleWork(article, 'ARTICLE', self.factory.backend.articleRequest)
if defer:
defer.addCallbacks(self._gotArticle, self._errArticle)
def _gotArticle(self, (index, id, article)):
if isinstance(article, types.StringType):
import warnings
warnings.warn(
"Returning the article as a string from `articleRequest' "
"is deprecated. Return a file-like object instead."
)
article = StringIO.StringIO(article)
self.currentIndex = index
self.sendLine('220 %d %s article' % (index, id))
s = basic.FileSender()
d = s.beginFileTransfer(article, self.transport)
d.addCallback(self.finishedFileTransfer)
##
## Helper for FileSender
##
def finishedFileTransfer(self, lastsent):
if lastsent != '\n':
line = '\r\n.'
else:
line = '.'
self.sendLine(line)
##
def _errArticle(self, failure):
print 'ARTICLE failed: ', failure
self.sendLine('423 bad article number')
def do_STAT(self, article = None):
defer = self.articleWork(article, 'STAT', self.factory.backend.articleRequest)
if defer:
defer.addCallbacks(self._gotStat, self._errStat)
def _gotStat(self, (index, id, article)):
self.currentIndex = index
self.sendLine('223 %d %s article retreived - request text separately' % (index, id))
def _errStat(self, failure):
print 'STAT failed: ', failure
self.sendLine('423 bad article number')
def do_HEAD(self, article = None):
defer = self.articleWork(article, 'HEAD', self.factory.backend.headRequest)
if defer:
defer.addCallbacks(self._gotHead, self._errHead)
def _gotHead(self, (index, id, head)):
self.currentIndex = index
self.sendLine('221 %d %s article retrieved' % (index, id))
self.transport.write(head + '\r\n')
self.sendLine('.')
def _errHead(self, failure):
print 'HEAD failed: ', failure
self.sendLine('423 no such article number in this group')
def do_BODY(self, article):
defer = self.articleWork(article, 'BODY', self.factory.backend.bodyRequest)
if defer:
defer.addCallbacks(self._gotBody, self._errBody)
def _gotBody(self, (index, id, body)):
if isinstance(body, types.StringType):
import warnings
warnings.warn(
"Returning the article as a string from `articleRequest' "
"is deprecated. Return a file-like object instead."
)
body = StringIO.StringIO(body)
self.currentIndex = index
self.sendLine('221 %d %s article retrieved' % (index, id))
self.lastsent = ''
s = basic.FileSender()
d = s.beginFileTransfer(body, self.transport)
d.addCallback(self.finishedFileTransfer)
def _errBody(self, failure):
print 'BODY failed: ', failure
self.sendLine('423 no such article number in this group')
# NEXT and LAST are just STATs that increment currentIndex first.
# Accordingly, use the STAT callbacks.
def do_NEXT(self):
i = self.currentIndex + 1
defer = self.factory.backend.articleRequest(self.currentGroup, i)
defer.addCallbacks(self._gotStat, self._errStat)
def do_LAST(self):
i = self.currentIndex - 1
defer = self.factory.backend.articleRequest(self.currentGroup, i)
defer.addCallbacks(self._gotStat, self._errStat)
def do_MODE(self, cmd):
cmd = cmd.strip().upper()
if cmd == 'READER':
self.servingSlave = 0
self.sendLine('200 Hello, you can post')
elif cmd == 'STREAM':
self.sendLine('500 Command not understood')
else:
# This is not a mistake
self.sendLine('500 Command not understood')
def do_QUIT(self):
self.sendLine('205 goodbye')
self.transport.loseConnection()
def do_HELP(self):
self.sendLine('100 help text follows')
self.sendLine('Read the RFC.')
self.sendLine('.')
def do_SLAVE(self):
self.sendLine('202 slave status noted')
self.servingeSlave = 1
def do_XPATH(self, article):
# XPATH is a silly thing to have. No client has the right to ask
# for this piece of information from me, and so that is what I'll
# tell them.
self.sendLine('502 access restriction or permission denied')
def do_XINDEX(self, article):
# XINDEX is another silly command. The RFC suggests it be relegated
# to the history books, and who am I to disagree?
self.sendLine('502 access restriction or permission denied')
def do_XROVER(self, range = None):
self.do_XHDR(self, 'References', range)
def do_IHAVE(self, id):
self.factory.backend.articleExistsRequest(id).addCallback(self._foundArticle)
def _foundArticle(self, result):
if result:
self.sendLine('437 article rejected - do not try again')
else:
self.sendLine('335 send article to be transferred. End with <CR-LF>.<CR-LF>')
self.inputHandler = self._handleIHAVE
self.message = ''
def _handleIHAVE(self, line):
if line == '.':
self.inputHandler = None
self.factory.backend.postRequest(
self.message
).addCallbacks(self._gotIHAVE, self._errIHAVE)
self.message = ''
else:
self.message = self.message + line + '\r\n'
def _gotIHAVE(self, result):
self.sendLine('235 article transferred ok')
def _errIHAVE(self, failure):
print 'IHAVE failed: ', failure
self.sendLine('436 transfer failed - try again later')
class UsenetClientProtocol(NNTPClient):
"""
A client that connects to an NNTP server and asks for articles new
since a certain time.
"""
def __init__(self, groups, date, storage):
"""
Fetch all new articles from the given groups since the
given date and dump them into the given storage. groups
is a list of group names. date is an integer or floating
point representing seconds since the epoch (GMT). storage is
any object that implements the NewsStorage interface.
"""
NNTPClient.__init__(self)
self.groups, self.date, self.storage = groups, date, storage
def connectionMade(self):
NNTPClient.connectionMade(self)
log.msg("Initiating update with remote host: " + str(self.transport.getPeer()))
self.setStream()
self.fetchNewNews(self.groups, self.date, '')
def articleExists(self, exists, article):
if exists:
self.fetchArticle(article)
else:
self.count = self.count - 1
self.disregard = self.disregard + 1
def gotNewNews(self, news):
self.disregard = 0
self.count = len(news)
log.msg("Transfering " + str(self.count) + " articles from remote host: " + str(self.transport.getPeer()))
for i in news:
self.storage.articleExistsRequest(i).addCallback(self.articleExists, i)
def getNewNewsFailed(self, reason):
log.msg("Updated failed (" + reason + ") with remote host: " + str(self.transport.getPeer()))
self.quit()
def gotArticle(self, article):
self.storage.postRequest(article)
self.count = self.count - 1
if not self.count:
log.msg("Completed update with remote host: " + str(self.transport.getPeer()))
if self.disregard:
log.msg("Disregarded %d articles." % (self.disregard,))
self.factory.updateChecks(self.transport.getPeer())
self.quit()
| agpl-3.0 |
espadrine/opera | chromium/src/third_party/trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/dispatch.py | 35 | 14459 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Dispatch WebSocket request.
"""
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import msgutil
from mod_pywebsocket import mux
from mod_pywebsocket import stream
from mod_pywebsocket import util
_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
_SOURCE_SUFFIX = '_wsh.py'
_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
'web_socket_passive_closing_handshake')
class DispatchException(Exception):
"""Exception in dispatching WebSocket request."""
def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
super(DispatchException, self).__init__(name)
self.status = status
def _default_passive_closing_handshake_handler(request):
"""Default web_socket_passive_closing_handshake handler."""
return common.STATUS_NORMAL_CLOSURE, ''
def _normalize_path(path):
"""Normalize path.
Args:
path: the path to normalize.
Path is converted to the absolute path.
The input path can use either '\\' or '/' as the separator.
The normalized path always uses '/' regardless of the platform.
"""
path = path.replace('\\', os.path.sep)
path = os.path.realpath(path)
path = path.replace('\\', '/')
return path
def _create_path_to_resource_converter(base_dir):
"""Returns a function that converts the path of a WebSocket handler source
file to a resource string by removing the path to the base directory from
its head, removing _SOURCE_SUFFIX from its tail, and replacing path
separators in it with '/'.
Args:
base_dir: the path to the base directory.
"""
base_dir = _normalize_path(base_dir)
base_len = len(base_dir)
suffix_len = len(_SOURCE_SUFFIX)
def converter(path):
if not path.endswith(_SOURCE_SUFFIX):
return None
# _normalize_path must not be used because resolving symlink breaks
# following path check.
path = path.replace('\\', '/')
if not path.startswith(base_dir):
return None
return path[base_len:-suffix_len]
return converter
def _enumerate_handler_file_paths(directory):
"""Returns a generator that enumerates WebSocket Handler source file names
in the given directory.
"""
for root, unused_dirs, files in os.walk(directory):
for base in files:
path = os.path.join(root, base)
if _SOURCE_PATH_PATTERN.search(path):
yield path
class _HandlerSuite(object):
"""A handler suite holder class."""
def __init__(self, do_extra_handshake, transfer_data,
passive_closing_handshake):
self.do_extra_handshake = do_extra_handshake
self.transfer_data = transfer_data
self.passive_closing_handshake = passive_closing_handshake
def _source_handler_file(handler_definition):
"""Source a handler definition string.
Args:
handler_definition: a string containing Python statements that define
handler functions.
"""
global_dic = {}
try:
exec handler_definition in global_dic
except Exception:
raise DispatchException('Error in sourcing handler:' +
util.get_stack_trace())
passive_closing_handshake_handler = None
try:
passive_closing_handshake_handler = _extract_handler(
global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
except Exception:
passive_closing_handshake_handler = (
_default_passive_closing_handshake_handler)
return _HandlerSuite(
_extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
_extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
passive_closing_handshake_handler)
def _extract_handler(dic, name):
"""Extracts a callable with the specified name from the given dictionary
dic.
"""
if name not in dic:
raise DispatchException('%s is not defined.' % name)
handler = dic[name]
if not callable(handler):
raise DispatchException('%s is not callable.' % name)
return handler
class Dispatcher(object):
"""Dispatches WebSocket requests.
This class maintains a map from resource name to handlers.
"""
def __init__(
self, root_dir, scan_dir=None,
allow_handlers_outside_root_dir=True):
"""Construct an instance.
Args:
root_dir: The directory where handler definition files are
placed.
scan_dir: The directory where handler definition files are
searched. scan_dir must be a directory under root_dir,
including root_dir itself. If scan_dir is None,
root_dir is used as scan_dir. scan_dir can be useful
in saving scan time when root_dir contains many
subdirectories.
allow_handlers_outside_root_dir: Scans handler files even if their
canonical path is not under root_dir.
"""
self._logger = util.get_class_logger(self)
self._handler_suite_map = {}
self._source_warnings = []
if scan_dir is None:
scan_dir = root_dir
if not os.path.realpath(scan_dir).startswith(
os.path.realpath(root_dir)):
raise DispatchException('scan_dir:%s must be a directory under '
'root_dir:%s.' % (scan_dir, root_dir))
self._source_handler_files_in_dir(
root_dir, scan_dir, allow_handlers_outside_root_dir)
def add_resource_path_alias(self,
alias_resource_path, existing_resource_path):
"""Add resource path alias.
Once added, request to alias_resource_path would be handled by
handler registered for existing_resource_path.
Args:
alias_resource_path: alias resource path
existing_resource_path: existing resource path
"""
try:
handler_suite = self._handler_suite_map[existing_resource_path]
self._handler_suite_map[alias_resource_path] = handler_suite
except KeyError:
raise DispatchException('No handler for: %r' %
existing_resource_path)
def source_warnings(self):
"""Return warnings in sourcing handlers."""
return self._source_warnings
def do_extra_handshake(self, request):
"""Do extra checking in WebSocket handshake.
Select a handler based on request.uri and call its
web_socket_do_extra_handshake function.
Args:
request: mod_python request.
Raises:
DispatchException: when handler was not found
AbortedByUserException: when user handler abort connection
HandshakeException: when opening handshake failed
"""
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
raise DispatchException('No handler for: %r' % request.ws_resource)
do_extra_handshake_ = handler_suite.do_extra_handshake
try:
do_extra_handshake_(request)
except handshake.AbortedByUserException, e:
raise
except Exception, e:
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_DO_EXTRA_HANDSHAKE_HANDLER_NAME,
request.ws_resource),
e)
raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
def transfer_data(self, request):
"""Let a handler transfer_data with a WebSocket client.
Select a handler based on request.ws_resource and call its
web_socket_transfer_data function.
Args:
request: mod_python request.
Raises:
DispatchException: when handler was not found
AbortedByUserException: when user handler abort connection
"""
# TODO(tyoshino): Terminate underlying TCP connection if possible.
try:
if mux.use_mux(request):
mux.start(request, self)
else:
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
raise DispatchException('No handler for: %r' %
request.ws_resource)
transfer_data_ = handler_suite.transfer_data
transfer_data_(request)
if not request.server_terminated:
request.ws_stream.close_connection()
# Catch non-critical exceptions the handler didn't handle.
except handshake.AbortedByUserException, e:
self._logger.debug('%s', e)
raise
except msgutil.BadOperationException, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE)
except msgutil.InvalidFrameException, e:
# InvalidFrameException must be caught before
# ConnectionTerminatedException that catches InvalidFrameException.
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
except msgutil.UnsupportedFrameException, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
except stream.InvalidUTF8Exception, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(
common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
except msgutil.ConnectionTerminatedException, e:
self._logger.debug('%s', e)
except Exception, e:
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
e)
raise
def passive_closing_handshake(self, request):
"""Prepare code and reason for responding client initiated closing
handshake.
"""
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
return _default_passive_closing_handshake_handler(request)
return handler_suite.passive_closing_handshake(request)
def get_handler_suite(self, resource):
"""Retrieves two handlers (one for extra handshake processing, and one
for data transfer) for the given request as a HandlerSuite object.
"""
fragment = None
if '#' in resource:
resource, fragment = resource.split('#', 1)
if '?' in resource:
resource = resource.split('?', 1)[0]
handler_suite = self._handler_suite_map.get(resource)
if handler_suite and fragment:
raise DispatchException('Fragment identifiers MUST NOT be used on '
'WebSocket URIs',
common.HTTP_STATUS_BAD_REQUEST)
return handler_suite
def _source_handler_files_in_dir(
self, root_dir, scan_dir, allow_handlers_outside_root_dir):
"""Source all the handler source files in the scan_dir directory.
The resource path is determined relative to root_dir.
"""
# We build a map from resource to handler code assuming that there's
# only one path from root_dir to scan_dir and it can be obtained by
# comparing realpath of them.
# Here we cannot use abspath. See
# https://bugs.webkit.org/show_bug.cgi?id=31603
convert = _create_path_to_resource_converter(root_dir)
scan_realpath = os.path.realpath(scan_dir)
root_realpath = os.path.realpath(root_dir)
for path in _enumerate_handler_file_paths(scan_realpath):
if (not allow_handlers_outside_root_dir and
(not os.path.realpath(path).startswith(root_realpath))):
self._logger.debug(
'Canonical path of %s is not under root directory' %
path)
continue
try:
handler_suite = _source_handler_file(open(path).read())
except DispatchException, e:
self._source_warnings.append('%s: %s' % (path, e))
continue
resource = convert(path)
if resource is None:
self._logger.debug(
'Path to resource conversion on %s failed' % path)
else:
self._handler_suite_map[convert(path)] = handler_suite
# vi:sts=4 sw=4 et
| bsd-3-clause |
FRC-Team-3140/north-american-happiness | lib/python2.7/site-packages/pymysql/tests/thirdparty/test_MySQLdb/test_MySQLdb_capabilities.py | 3 | 3553 | #!/usr/bin/env python
from . import capabilities
try:
import unittest2 as unittest
except ImportError:
import unittest
import pymysql
from pymysql.tests import base
import warnings
warnings.filterwarnings('error')
class test_MySQLdb(capabilities.DatabaseTest):
db_module = pymysql
connect_args = ()
connect_kwargs = base.PyMySQLTestCase.databases[0].copy()
connect_kwargs.update(dict(read_default_file='~/.my.cnf',
use_unicode=True,
charset='utf8', sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL"))
create_table_extra = "ENGINE=INNODB CHARACTER SET UTF8"
leak_test = False
def quote_identifier(self, ident):
return "`%s`" % ident
def test_TIME(self):
from datetime import timedelta
def generator(row,col):
return timedelta(0, row*8000)
self.check_data_integrity(
('col1 TIME',),
generator)
def test_TINYINT(self):
# Number data
def generator(row,col):
v = (row*row) % 256
if v > 127:
v = v-256
return v
self.check_data_integrity(
('col1 TINYINT',),
generator)
def test_stored_procedures(self):
db = self.connection
c = self.cursor
try:
self.create_table(('pos INT', 'tree CHAR(20)'))
c.executemany("INSERT INTO %s (pos,tree) VALUES (%%s,%%s)" % self.table,
list(enumerate('ash birch cedar larch pine'.split())))
db.commit()
c.execute("""
CREATE PROCEDURE test_sp(IN t VARCHAR(255))
BEGIN
SELECT pos FROM %s WHERE tree = t;
END
""" % self.table)
db.commit()
c.callproc('test_sp', ('larch',))
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 3)
c.nextset()
finally:
c.execute("DROP PROCEDURE IF EXISTS test_sp")
c.execute('drop table %s' % (self.table))
def test_small_CHAR(self):
# Character data
def generator(row,col):
i = ((row+1)*(col+1)+62)%256
if i == 62: return ''
if i == 63: return None
return chr(i)
self.check_data_integrity(
('col1 char(1)','col2 char(1)'),
generator)
def test_bug_2671682(self):
from pymysql.constants import ER
try:
self.cursor.execute("describe some_non_existent_table");
except self.connection.ProgrammingError as msg:
self.assertEqual(msg.args[0], ER.NO_SUCH_TABLE)
def test_insert_values(self):
from pymysql.cursors import insert_values
query = """INSERT FOO (a, b, c) VALUES (a, b, c)"""
matched = insert_values.search(query)
self.assertTrue(matched)
values = matched.group(1)
self.assertTrue(values == "(a, b, c)")
def test_ping(self):
self.connection.ping()
def test_literal_int(self):
self.assertTrue("2" == self.connection.literal(2))
def test_literal_float(self):
self.assertTrue("3.1415" == self.connection.literal(3.1415))
def test_literal_string(self):
self.assertTrue("'foo'" == self.connection.literal("foo"))
if __name__ == '__main__':
if test_MySQLdb.leak_test:
import gc
gc.enable()
gc.set_debug(gc.DEBUG_LEAK)
unittest.main()
| mit |
batra-mlp-lab/DIGITS | digits/dataset/tasks/analyze_db.py | 1 | 4644 | # Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import digits
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled object
PICKLE_VERSION = 1
@subclass
class AnalyzeDbTask(Task):
"""
Reads information from a database
"""
def __init__(self, database, purpose, **kwargs):
"""
Arguments:
database -- path to the database to analyze
purpose -- what is this database going to be used for
Keyword arguments:
force_same_shape -- if True, enforce that every entry in the database has the same shape
"""
self.force_same_shape = kwargs.pop('force_same_shape', False)
super(AnalyzeDbTask, self).__init__(**kwargs)
self.pickver_task_analyzedb = PICKLE_VERSION
self.database = database
self.purpose = purpose
self.backend = 'lmdb'
# Results
self.image_count = None
self.image_width = None
self.image_height = None
self.image_channels = None
self.analyze_db_log_file = 'analyze_db_%s.log' % '-'.join(p.lower() for p in self.purpose.split())
def __getstate__(self):
state = super(AnalyzeDbTask, self).__getstate__()
if 'analyze_db_log' in state:
del state['analyze_db_log']
return state
def __setstate__(self, state):
super(AnalyzeDbTask, self).__setstate__(state)
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
@override
def name(self):
return 'Analyze DB (%s)' % (self.purpose)
@override
def html_id(self):
return 'task-analyze-db-%s' % '-'.join(p.lower() for p in self.purpose.split())
@override
def offer_resources(self, resources):
key = 'analyze_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'analyze_db.py'),
self.database,
]
if self.force_same_shape:
args.append('--force-same-shape')
else:
args.append('--only-count')
return args
@override
def before_run(self):
super(AnalyzeDbTask, self).before_run()
self.analyze_db_log = open(self.path(self.analyze_db_log_file), 'a')
@override
def process_output(self, line):
self.analyze_db_log.write('%s\n' % line)
self.analyze_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Progress: (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/float(match.group(2))
self.emit_progress_update()
return True
# total count
match = re.match(r'Total entries: (\d+)', message)
if match:
self.image_count = int(match.group(1))
return True
# image dimensions
match = re.match(r'(\d+) entries found with shape ((\d+)x(\d+)x(\d+))', message)
if match:
count = int(match.group(1))
dims = match.group(2)
self.image_width = int(match.group(3))
self.image_height = int(match.group(4))
self.image_channels = int(match.group(5))
self.logger.debug('Images are %s' % dims)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
super(AnalyzeDbTask, self).after_run()
self.analyze_db_log.close()
def image_type(self):
"""
Returns an easy-to-read version of self.image_channels
"""
if self.image_channels is None:
return None
elif self.image_channels == 1:
return 'GRAYSCALE'
elif self.image_channels == 3:
return 'COLOR'
else:
return '%s-channel' % self.image_channels
| bsd-3-clause |
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/optimize/minpack.py | 2 | 19848 | import warnings
import _minpack
from numpy import atleast_1d, dot, take, triu, shape, eye, \
transpose, zeros, product, greater, array, \
all, where, isscalar, asarray, inf, abs
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, 'func_name', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
raise TypeError(msg)
return shape(res)
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=0.0, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable f(x, *args)
A function that takes at least one (possibly vector) argument.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple
Any extra arguments to `func`.
fprime : callable(x)
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool
If True, return optional outputs.
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys::
* 'nfev': number of function calls
* 'njev': number of Jacobian calls
* 'fvec': function evaluated at the output
* 'fjac': the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
* 'r': upper triangular matrix produced by QR factorization of same
matrix
* 'qtf': the vector (transpose(q) * fvec)
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
Other Parameters
----------------
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
x0 = array(x0, ndmin=1)
n = len(x0)
if type(args) != type(()): args = (args,)
_check_func('fsolve', 'func', func, x0, args, n, (n,))
Dfun = fprime
if Dfun is None:
if band is None:
ml, mu = -10,-10
else:
ml, mu = band[:2]
if (maxfev == 0):
maxfev = 200*(n + 1)
retval = _minpack._hybrd(func, x0, args, full_output, xtol,
maxfev, ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n,n))
if (maxfev == 0):
maxfev = 100*(n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, full_output,
col_deriv, xtol, maxfev, factor,diag)
errors = {0:["Improper input parameters were entered.",TypeError],
1:["The solution converged.", None],
2:["The number of calls to function has "
"reached maxfev = %d." % maxfev, ValueError],
3:["xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol, ValueError],
4:["The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.", ValueError],
5:["The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.", ValueError],
'unknown': ["An error occurred.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info != 1 and not full_output):
if info in [2,3,4,5]:
msg = errors[info][0]
warnings.warn(msg, RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
if full_output:
try:
return retval + (errors[info][0],) # Return all + the message
except KeyError:
return retval + (errors['unknown'][0],)
else:
return retval[0]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers.
x0 : ndarray
The starting estimate for the minimization.
args : tuple
Any extra arguments to func are placed in this tuple.
Dfun : callable
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool
non-zero to return all optional outputs.
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int
The maximum number of calls to the function. If zero, then 100*(N+1) is
the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of the
Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
it is assumed that the relative errors in the functions are of the
order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. ``None`` if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual standard deviation to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s::
- 'nfev' : the number of function calls
- 'fvec' : the function evaluated at the output
- 'fjac' : A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
- 'ipvt' : an integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
- 'qtf' : the vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = array(x0, ndmin=1)
n = len(x0)
if type(args) != type(()):
args = (args,)
m = _check_func('leastsq', 'func', func, x0, args, n)[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m))
if Dfun is None:
if (maxfev == 0):
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n,m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m,n))
if (maxfev == 0):
maxfev = 100*(n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0:["Improper input parameters.", TypeError],
1:["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2:["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3:["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol,xtol), None],
4:["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5:["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6:["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol, ValueError],
7:["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol, ValueError],
8:["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown':["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info not in [1,2,3,4] and not full_output):
if info in [5,6,7,8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1,2,3,4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n),retval[1]['ipvt']-1,0)
r = triu(transpose(retval[1]['fjac'])[:n,:])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R),R))
except LinAlgError:
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info)
def _general_function(params, xdata, ydata, function):
return function(xdata, *params) - ydata
def _weighted_general_function(params, xdata, ydata, function, weights):
return weights * (function(xdata, *params) - ydata)
def curve_fit(f, xdata, ydata, p0=None, sigma=None, **kw):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : An N-length sequence or an (k,N)-shaped array
for functions with k predictors.
The independent variable where the data is measured.
ydata : N-length sequence
The dependent data --- nominally f(xdata, ...)
p0 : None, scalar, or M-length sequence
Initial guess for the parameters. If None, then the initial
values will all be 1 (if the number of parameters for the function
can be determined using introspection, otherwise a ValueError
is raised).
sigma : None or N-length sequence
If not None, it represents the standard-deviation of ydata.
This vector, if given, will be used as weights in the
least-squares problem.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared error
of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate.
See Also
--------
leastsq
Notes
-----
The algorithm uses the Levenburg-Marquardt algorithm through `leastsq`.
Additional keyword arguments are passed directly to that algorithm.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a*np.exp(-b*x) + c
>>> x = np.linspace(0,4,50)
>>> y = func(x, 2.5, 1.3, 0.5)
>>> yn = y + 0.2*np.random.normal(size=len(x))
>>> popt, pcov = curve_fit(func, x, yn)
"""
if p0 is None:
# determine number of parameters by inspecting the function
import inspect
args, varargs, varkw, defaults = inspect.getargspec(f)
if len(args) < 2:
msg = "Unable to determine number of fit parameters."
raise ValueError(msg)
if 'self' in args:
p0 = [1.0] * (len(args)-2)
else:
p0 = [1.0] * (len(args)-1)
if isscalar(p0):
p0 = array([p0])
args = (xdata, ydata, f)
if sigma is None:
func = _general_function
else:
func = _weighted_general_function
args += (1.0/asarray(sigma),)
# Remove full_output from kw, otherwise we're passing it in twice.
return_full = kw.pop('full_output', False)
res = leastsq(func, p0, args=args, full_output=1, **kw)
(popt, pcov, infodict, errmsg, ier) = res
if ier not in [1,2,3,4]:
msg = "Optimal parameters not found: " + errmsg
raise RuntimeError(msg)
if (len(ydata) > len(p0)) and pcov is not None:
s_sq = (func(popt, *args)**2).sum()/(len(ydata)-len(p0))
pcov = pcov * s_sq
else:
pcov = inf
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x,*args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x,*args))
fjac = fjac.reshape((m,n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp,*args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (product(greater(err, 0.5), axis=0))
return (good, err)
# Steffensen's Method using Aitken's Del^2 convergence acceleration.
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500):
"""Find the point where func(x) == x
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where func(x)=x.
Uses Steffensen's Method using Aitken's Del^2 convergence acceleration.
See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from numpy import sqrt, array
>>> from scipy.optimize import fixed_point
>>> def func(x, c1, c2):
return sqrt(c1/(x+c2))
>>> c1 = array([10,12.])
>>> c2 = array([3, 5.])
>>> fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
if not isscalar(x0):
x0 = asarray(x0)
p0 = x0
for iter in range(maxiter):
p1 = func(p0, *args)
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = where(d == 0, p2, p0 - (p1 - p0)*(p1 - p0) / d)
relerr = where(p0 == 0, p, (p-p0)/p0)
if all(abs(relerr) < xtol):
return p
p0 = p
else:
p0 = x0
for iter in range(maxiter):
p1 = func(p0, *args)
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
if d == 0.0:
return p2
else:
p = p0 - (p1 - p0)*(p1 - p0) / d
if p0 == 0:
relerr = p
else:
relerr = (p - p0)/p0
if abs(relerr) < xtol:
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
| gpl-3.0 |
jvkops/django | tests/sites_framework/tests.py | 108 | 2923 | from django.apps import apps
from django.conf import settings
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.core import checks
from django.db import models
from django.test import TestCase
from .models import (
AbstractArticle, CustomArticle, ExclusiveArticle, SyndicatedArticle,
)
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID + 1, domain="example2.com", name="example2.com")
self._old_models = apps.app_configs['sites_framework'].models.copy()
def tearDown(self):
apps.app_configs['sites_framework'].models = self._old_models
apps.all_models['sites_framework'] = self._old_models
apps.clear_cache()
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(title="Tantalizing News!", places_this_article_should_appear_id=settings.SITE_ID)
self.assertEqual(CustomArticle.on_site.all().get(), article)
def test_invalid_name(self):
class InvalidArticle(AbstractArticle):
site = models.ForeignKey(Site, models.CASCADE)
objects = models.Manager()
on_site = CurrentSiteManager("places_this_article_should_appear")
errors = InvalidArticle.check()
expected = [
checks.Error(
("CurrentSiteManager could not find a field named "
"'places_this_article_should_appear'."),
hint=None,
obj=InvalidArticle.on_site,
id='sites.E001',
)
]
self.assertEqual(errors, expected)
def test_invalid_field_type(self):
class ConfusedArticle(AbstractArticle):
site = models.IntegerField()
errors = ConfusedArticle.check()
expected = [
checks.Error(
"CurrentSiteManager cannot use 'ConfusedArticle.site' as it is not a ForeignKey or ManyToManyField.",
hint=None,
obj=ConfusedArticle.on_site,
id='sites.E002',
)
]
self.assertEqual(errors, expected)
| bsd-3-clause |
rorasa/KeeTerm | Crypto/Signature/PKCS1_PSS.py | 123 | 12228 | # -*- coding: utf-8 -*-
#
# Signature/PKCS1_PSS.py : PKCS#1 PPS
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RSA digital signature protocol with appendix according to PKCS#1 PSS.
See RFC3447__ or the `original RSA Labs specification`__.
This scheme is more properly called ``RSASSA-PSS``.
For example, a sender may authenticate a message using SHA-1 and PSS like
this:
>>> from Crypto.Signature import PKCS1_PSS
>>> from Crypto.Hash import SHA
>>> from Crypto.PublicKey import RSA
>>> from Crypto import Random
>>>
>>> message = 'To be signed'
>>> key = RSA.importKey(open('privkey.der').read())
>>> h = SHA.new()
>>> h.update(message)
>>> signer = PKCS1_PSS.new(key)
>>> signature = PKCS1_PSS.sign(key)
At the receiver side, verification can be done like using the public part of
the RSA key:
>>> key = RSA.importKey(open('pubkey.der').read())
>>> h = SHA.new()
>>> h.update(message)
>>> verifier = PKCS1_PSS.new(key)
>>> if verifier.verify(h, signature):
>>> print "The signature is authentic."
>>> else:
>>> print "The signature is not authentic."
:undocumented: __revision__, __package__
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125
"""
# Allow nested scopes in Python 2.1
# See http://oreilly.com/pub/a/python/2001/04/19/pythonnews.html
from __future__ import nested_scopes
__revision__ = "$Id$"
__all__ = [ 'new', 'PSS_SigScheme' ]
from Crypto.Util.py3compat import *
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
import Crypto.Util.number
from Crypto.Util.number import ceil_shift, ceil_div, long_to_bytes
from Crypto.Util.strxor import strxor
class PSS_SigScheme:
"""This signature scheme can perform PKCS#1 PSS RSA signature or verification."""
def __init__(self, key, mgfunc, saltLen):
"""Initialize this PKCS#1 PSS signature scheme object.
:Parameters:
key : an RSA key object
If a private half is given, both signature and verification are possible.
If a public half is given, only verification is possible.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
saltLen : int
Length of the salt, in bytes.
"""
self._key = key
self._saltLen = saltLen
self._mgfunc = mgfunc
def can_sign(self):
"""Return True if this cipher object can be used for signing messages."""
return self._key.has_private()
def sign(self, mhash):
"""Produce the PKCS#1 PSS signature of a message.
This function is named ``RSASSA-PSS-SIGN``, and is specified in
section 8.1.1 of RFC3447.
:Parameters:
mhash : hash object
The hash that was carried out over the message. This is an object
belonging to the `Crypto.Hash` module.
:Return: The PSS signature encoded as a string.
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
hash algorithm.
:Raise TypeError:
If the RSA key has no private half.
:attention: Modify the salt length and the mask generation function only
if you know what you are doing.
The receiver must use the same parameters too.
"""
# TODO: Verify the key is RSA
randfunc = self._key._randfunc
# Set defaults for salt length and mask generation function
if self._saltLen == None:
sLen = mhash.digest_size
else:
sLen = self._saltLen
if self._mgfunc:
mgf = self._mgfunc
else:
mgf = lambda x,y: MGF1(x,y,mhash)
modBits = Crypto.Util.number.size(self._key.n)
# See 8.1.1 in RFC3447
k = ceil_div(modBits,8) # Convert from bits to bytes
# Step 1
em = EMSA_PSS_ENCODE(mhash, modBits-1, randfunc, mgf, sLen)
# Step 2a (OS2IP) and 2b (RSASP1)
m = self._key.decrypt(em)
# Step 2c (I2OSP)
S = bchr(0x00)*(k-len(m)) + m
return S
def verify(self, mhash, S):
"""Verify that a certain PKCS#1 PSS signature is authentic.
This function checks if the party holding the private half of the given
RSA key has really signed the message.
This function is called ``RSASSA-PSS-VERIFY``, and is specified in section
8.1.2 of RFC3447.
:Parameters:
mhash : hash object
The hash that was carried out over the message. This is an object
belonging to the `Crypto.Hash` module.
S : string
The signature that needs to be validated.
:Return: True if verification is correct. False otherwise.
"""
# TODO: Verify the key is RSA
# Set defaults for salt length and mask generation function
if self._saltLen == None:
sLen = mhash.digest_size
else:
sLen = self._saltLen
if self._mgfunc:
mgf = self._mgfunc
else:
mgf = lambda x,y: MGF1(x,y,mhash)
modBits = Crypto.Util.number.size(self._key.n)
# See 8.1.2 in RFC3447
k = ceil_div(modBits,8) # Convert from bits to bytes
# Step 1
if len(S) != k:
return False
# Step 2a (O2SIP), 2b (RSAVP1), and partially 2c (I2OSP)
# Note that signature must be smaller than the module
# but RSA.py won't complain about it.
# TODO: Fix RSA object; don't do it here.
em = self._key.encrypt(S, 0)[0]
# Step 2c
emLen = ceil_div(modBits-1,8)
em = bchr(0x00)*(emLen-len(em)) + em
# Step 3
try:
result = EMSA_PSS_VERIFY(mhash, em, modBits-1, mgf, sLen)
except ValueError:
return False
# Step 4
return result
def MGF1(mgfSeed, maskLen, hash):
"""Mask Generation Function, described in B.2.1"""
T = b("")
for counter in xrange(ceil_div(maskLen, hash.digest_size)):
c = long_to_bytes(counter, 4)
T = T + hash.new(mgfSeed + c).digest()
assert(len(T)>=maskLen)
return T[:maskLen]
def EMSA_PSS_ENCODE(mhash, emBits, randFunc, mgf, sLen):
"""
Implement the ``EMSA-PSS-ENCODE`` function, as defined
in PKCS#1 v2.1 (RFC3447, 9.1.1).
The original ``EMSA-PSS-ENCODE`` actually accepts the message ``M`` as input,
and hash it internally. Here, we expect that the message has already
been hashed instead.
:Parameters:
mhash : hash object
The hash object that holds the digest of the message being signed.
emBits : int
Maximum length of the final encoding, in bits.
randFunc : callable
An RNG function that accepts as only parameter an int, and returns
a string of random bytes, to be used as salt.
mgf : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
sLen : int
Length of the salt, in bytes.
:Return: An ``emLen`` byte long string that encodes the hash
(with ``emLen = \ceil(emBits/8)``).
:Raise ValueError:
When digest or salt length are too big.
"""
emLen = ceil_div(emBits,8)
# Bitmask of digits that fill up
lmask = 0
for i in xrange(8*emLen-emBits):
lmask = lmask>>1 | 0x80
# Step 1 and 2 have been already done
# Step 3
if emLen < mhash.digest_size+sLen+2:
raise ValueError("Digest or salt length are too long for given key size.")
# Step 4
salt = b("")
if randFunc and sLen>0:
salt = randFunc(sLen)
# Step 5 and 6
h = mhash.new(bchr(0x00)*8 + mhash.digest() + salt)
# Step 7 and 8
db = bchr(0x00)*(emLen-sLen-mhash.digest_size-2) + bchr(0x01) + salt
# Step 9
dbMask = mgf(h.digest(), emLen-mhash.digest_size-1)
# Step 10
maskedDB = strxor(db,dbMask)
# Step 11
maskedDB = bchr(bord(maskedDB[0]) & ~lmask) + maskedDB[1:]
# Step 12
em = maskedDB + h.digest() + bchr(0xBC)
return em
def EMSA_PSS_VERIFY(mhash, em, emBits, mgf, sLen):
"""
Implement the ``EMSA-PSS-VERIFY`` function, as defined
in PKCS#1 v2.1 (RFC3447, 9.1.2).
``EMSA-PSS-VERIFY`` actually accepts the message ``M`` as input,
and hash it internally. Here, we expect that the message has already
been hashed instead.
:Parameters:
mhash : hash object
The hash object that holds the digest of the message to be verified.
em : string
The signature to verify, therefore proving that the sender really signed
the message that was received.
emBits : int
Length of the final encoding (em), in bits.
mgf : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
sLen : int
Length of the salt, in bytes.
:Return: 0 if the encoding is consistent, 1 if it is inconsistent.
:Raise ValueError:
When digest or salt length are too big.
"""
emLen = ceil_div(emBits,8)
# Bitmask of digits that fill up
lmask = 0
for i in xrange(8*emLen-emBits):
lmask = lmask>>1 | 0x80
# Step 1 and 2 have been already done
# Step 3
if emLen < mhash.digest_size+sLen+2:
return False
# Step 4
if ord(em[-1:])!=0xBC:
return False
# Step 5
maskedDB = em[:emLen-mhash.digest_size-1]
h = em[emLen-mhash.digest_size-1:-1]
# Step 6
if lmask & bord(em[0]):
return False
# Step 7
dbMask = mgf(h, emLen-mhash.digest_size-1)
# Step 8
db = strxor(maskedDB, dbMask)
# Step 9
db = bchr(bord(db[0]) & ~lmask) + db[1:]
# Step 10
if not db.startswith(bchr(0x00)*(emLen-mhash.digest_size-sLen-2) + bchr(0x01)):
return False
# Step 11
salt = b("")
if sLen: salt = db[-sLen:]
# Step 12 and 13
hp = mhash.new(bchr(0x00)*8 + mhash.digest() + salt).digest()
# Step 14
if h!=hp:
return False
return True
def new(key, mgfunc=None, saltLen=None):
"""Return a signature scheme object `PSS_SigScheme` that
can be used to perform PKCS#1 PSS signature or verification.
:Parameters:
key : RSA key object
The key to use to sign or verify the message. This is a `Crypto.PublicKey.RSA` object.
Signing is only possible if *key* is a private RSA key.
mgfunc : callable
A mask generation function that accepts two parameters: a string to
use as seed, and the lenth of the mask to generate, in bytes.
If not specified, the standard MGF1 is used.
saltLen : int
Length of the salt, in bytes. If not specified, it matches the output
size of the hash function.
"""
return PSS_SigScheme(key, mgfunc, saltLen)
| mit |
mrquim/repository.mrquim | repo/script.module.exodus/lib/resources/lib/sources/de/horrorkino.py | 5 | 3416 | # -*- coding: utf-8 -*-
"""
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.genre_filter = ['horror']
self.domains = ['horrorkino.do.am']
self.base_link = 'http://horrorkino.do.am/'
self.search_link = 'video/shv'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
r = client.request(urlparse.urljoin(self.base_link, url))
r = re.findall('''vicode\s*=\s*["'](.*?)["'];''', r)[0].decode('string_escape')
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': i, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year):
try:
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(urlparse.urljoin(self.base_link, self.search_link), post={'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'li', attrs={'class': 'entTd'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 've-screen'}, req='title')
r = [(dom_parser.parse_dom(i, 'a', req='href'), i.attrs['title'].split(' - ')[0]) for i in r]
r = [(i[0][0].attrs['href'], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return
| gpl-2.0 |
chetan/cherokee | qa/107-Priority1.py | 8 | 1113 | from base import *
COMMENT = "This is comment inside the CGI"
TEXT = "It should be printed by the CGI"
CONF = """
vserver!1!rule!1070!match = directory
vserver!1!rule!1070!match!directory = /prio1
vserver!1!rule!1070!handler = file
vserver!1!rule!1071!match = directory
vserver!1!rule!1071!match!directory = /prio1/sub
vserver!1!rule!1071!handler = cgi
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Priorities: Dir and then Dir"
self.request = "GET /prio1/sub/exec.cgi HTTP/1.0\r\n"
self.expected_error = 200
self.expected_content = TEXT
self.forbidden_content = COMMENT
self.conf = CONF
def Prepare (self, www):
d = self.Mkdir (www, "prio1/sub")
f = self.WriteFile (d, "exec.cgi", 0555,
"""#!/bin/sh
echo "Content-type: text/html"
echo ""
# %s
echo "%s"
""" % (COMMENT, TEXT))
| gpl-2.0 |
umitproject/tease-o-matic | django/core/mail/__init__.py | 229 | 5072 | """
Tools for sending email.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Imported for backwards compatibility, and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.utils import CachedDnsName, DNS_NAME
from django.core.mail.message import \
EmailMessage, EmailMultiAlternatives, \
SafeMIMEText, SafeMIMEMultipart, \
DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid, \
BadHeaderError, forbid_multi_line_headers
from django.core.mail.backends.smtp import EmailBackend as _SMTPConnection
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an e-mail backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
path = backend or settings.EMAIL_BACKEND
try:
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError, e:
raise ImproperlyConfigured(('Error importing email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
return EmailMessage(subject, message, from_email, recipient_list,
connection=connection).send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of e-mails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
class SMTPConnection(_SMTPConnection):
def __init__(self, *args, **kwds):
import warnings
warnings.warn(
'mail.SMTPConnection is deprecated; use mail.get_connection() instead.',
DeprecationWarning
)
super(SMTPConnection, self).__init__(*args, **kwds)
| bsd-3-clause |
JSeam2/IsoGraph | old/gen_iso_graph.py | 1 | 4050 | # -*- coding: utf-8 -*-
"""
Generate adjacency matrices of isomorphic graphs
task
1) Check the sizes of isomorphic graphs you want to generate
2) Store them in different numpy files for various graph sizes
Build a table for example:
______________________________________
| Graph 1 | Graph 2 | Is Isomorphic? |
|--------------------------------------
| ... | ... | 0 - No; 1 - Yes|
|______________________________________
. . .
. . .
. . .
"""
import os
import numpy as np
import networkx as nx
from networkx.algorithms import isomorphism
import sqlite3
def gen_rnd_graph(n, mode="dense"):
"""
Generate a random pair of isomorphic graphs as adjacency matrices
Adjacency matrices are numpy arrays
n gives the total number of nodes in the graph
If graphs are isomorphic:
put 1 in the Is Isomorphic column
else:
put 0 in the Is Isomorphic column
output the | Graph_1 | Graph_2 |
Output the isomorphic graphs adjacency matrix
Some mathematical definition:
G ≅ H (G is isomorphic to H)
iff ∃ a: V(G)→ V(H) (A bijection)
such that
a(u)a(v) ∈ E(H) ↔ uv ∈ E(G)
Similarly,
for some permutation matrix P,
G ≅ H ↔ A_G = P* A_H *P_transpose
:param:
nodes(int): number of node
mode(str) : 'dense' to generate dense graph
'sparse' for sparse graph
:returns:
tuple (graph1(numpy), graph2(numpy), is_isomorphic(int))
"""
if mode == 'dense':
# Generate random graph, G1
G1 = nx.dense_gnm_random_graph(n, n)
# Generate random graph, G2
G2 = nx.dense_gnm_random_graph(n, n)
# This might not really be sparse
elif mode == 'sparse':
G1 = nx.gnm_random_graph(n, n)
G2 = nx.gnm_random_graph(n, n)
else:
return 'Invalid Mode'
# Check if graphs are isomorphic
GM = isomorphism.GraphMatcher(G1, G2)
# Check if graphs are isomorphic
if GM.is_isomorphic():
is_GM_isomorphic = 1
else:
is_GM_isomorphic = 0
# Convert graphs to numpy matrix
G1_numpy = nx.to_numpy_matrix(G1)
G2_numpy = nx.to_numpy_matrix(G2)
return (G1_numpy, G2_numpy, is_GM_isomorphic)
def save_graph(nodes, num_graph, db_path = "./graph.db" ):
"""
Looks for graph.db, creates graph.db if it doesn't exist
Run gen_rnd_graph(nodes), creates up till the nodes in parameters. Doesn't create for 3 nodes and below.
and store it with sqlite3
:param: nodes := number of nodes the database will make until(int)
:param: num_graph := number of graphs to generate (int)
:param: db_path := path of sqlite3 db, default is same directory as gen_iso_graph.py
"""
# in event connection to database is not possible put None
conn = None
# connect to db path
# will make sql database if it doesn't exist
conn = sqlite3.connect(db_path)
with conn:
# 1st loop to make various tables with various nodes x
# 2nd loop to make insert gen_rnd_graph entries with nodes x
for x in range(3,nodes):
cur = conn.cursor()
# Create Table this string formatting of a SQL command is generally
# bad but we can make do with this for now.
cur.execute("CREATE TABLE IF NOT EXISTS Node_{} (Id INT, Graph1 BLOB, Graph2 BLOB, is_isomorphic INT)".format(str(x)))
for num in range(num_graph):
g1, g2 , is_isomorphic = gen_rnd_graph(x)
# Convert np tostring
# To retrieve back using np.fromstring(bytearray)
cur.execute("INSERT INTO Node_{} VALUES(?,?,?,?)".format(str(x))
,(num, g1.tostring(), g2.tostring(), is_isomorphic))
conn.commit()
jkj
if __name__ == "__main__":
#save_graph(10, 20000, "./graph.db")
A,B,C = (gen_rnd_graph(3,mode='sparse'))
print(A)
print()
print(B)
print()
print(C)
| mit |
adamfisk/littleshoot-client | server/common/appengine/patch/common/appenginepatch/ragendja/forms.py | 1 | 11392 | from copy import deepcopy
import re
from django.utils.datastructures import SortedDict, MultiValueDict
from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.safestring import mark_safe
from django.forms.widgets import flatatt
from google.appengine.ext import db
from ragendja.dbutils import transaction
class FormWithSets(object):
def __init__(self, form, formsets=()):
self.form = form
setattr(self, '__module__', form.__module__)
setattr(self, '__name__', form.__name__ + 'WithSets')
setattr(self, '__doc__', form.__doc__)
self._meta = form._meta
fields = [(name, field) for name, field in form.base_fields.iteritems() if isinstance(field, FormSetField)]
formset_dict = dict(formsets)
newformsets = []
for name, field in fields:
if formset_dict.has_key(name):
continue
newformsets.append((name, {'formset':field.make_formset(form._meta.model)}))
self.formsets = formsets + tuple(newformsets)
def __call__(self, *args, **kwargs):
prefix = kwargs['prefix'] + '-' if 'prefix' in kwargs else ''
form = self.form(*args, **kwargs)
formsets = []
for name, formset in self.formsets:
kwargs['prefix'] = prefix + name
instance = formset['formset'](*args, **kwargs)
if form.base_fields.has_key(name):
field = form.base_fields[name]
else:
field = FormSetField(formset['formset'].model, **formset)
formsets.append(BoundFormSet(field, instance, name, formset))
return type(self.__name__ + 'Instance', (FormWithSetsInstance, ), {})(self, form, formsets)
def pretty_name(name):
"Converts 'first_name' to 'First name'"
name = name[0].upper() + name[1:]
return name.replace('_', ' ')
table_sections_re = re.compile(r'^(.*?)(<tr>.*</tr>)(.*?)$', re.DOTALL)
table_row_re = re.compile(r'(<tr>(<th><label.*?</label></th>)(<td>.*?</td>)</tr>)', re.DOTALL)
ul_sections_re = re.compile(r'^(.*?)(<li>.*</li>)(.*?)$', re.DOTALL)
ul_row_re = re.compile(r'(<li>(<label.*?</label>)(.*?)</li>)', re.DOTALL)
p_sections_re = re.compile(r'^(.*?)(<p>.*</p>)(.*?)$', re.DOTALL)
p_row_re = re.compile(r'(<p>(<label.*?</label>)(.*?)</p>)', re.DOTALL)
label_re = re.compile(r'^(.*)<label for="id_(.*?)">(.*)</label>(.*)$')
class BoundFormSet(StrAndUnicode):
def __init__(self, field, formset, name, args):
self.field = field
self.formset = formset
self.name = name
self.args = args
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.auto_id = self.formset.auto_id % self.formset.prefix
if args.has_key('attrs'):
self.attrs = args['attrs'].copy()
else:
self.attrs = {}
def __unicode__(self):
"""Renders this field as an HTML widget."""
return self.as_widget()
def as_widget(self, attrs=None):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and not self.args.has_key('id'):
attrs['id'] = auto_id
try:
data = self.formset.as_table()
name = self.name
return self.render(name, data, attrs=attrs)
except Exception, e:
import traceback
return traceback.format_exc()
def render(self, name, value, attrs=None):
table_sections = table_sections_re.search(value).groups()
output = []
heads = []
current_row = []
first_row = True
first_head_id = None
prefix = 'id_%s-%%s-' % self.formset.prefix
for row, head, item in table_row_re.findall(table_sections[1]):
if first_row:
head_groups = label_re.search(head).groups()
if first_head_id == head_groups[1]:
first_row = False
output.append(current_row)
current_row = []
else:
heads.append('%s%s%s' % (head_groups[0], head_groups[2], head_groups[3]))
if first_head_id is None:
first_head_id = head_groups[1].replace('-0-','-1-')
current_row.append(item)
if not first_row and len(current_row) >= len(heads):
output.append(current_row)
current_row = []
if len(current_row) != 0:
raise Exception('Unbalanced render')
def last_first(tuple):
return tuple[-1:] + tuple[:-1]
return mark_safe(u'%s<table%s><tr>%s</tr><tr>%s</tr></table>%s'%(
table_sections[0],
flatatt(attrs),
u''.join(last_first(heads)),
u'</tr><tr>'.join((u''.join(last_first(x)) for x in output)),
table_sections[2]))
class CachedQuerySet(object):
def __init__(self, get_queryset):
self.queryset_results = (x for x in get_queryset())
def __call__(self):
return self.queryset_results
class FormWithSetsInstance(object):
def __init__(self, master, form, formsets):
self.master = master
self.form = form
self.formsets = formsets
self.instance = form.instance
def __unicode__(self):
return self.as_table()
def is_valid(self):
result = self.form.is_valid()
for bf in self.formsets:
result = bf.formset.is_valid() and result
return result
def save(self, *args, **kwargs):
def save_forms(forms, obj=None):
for form in forms:
if not instance and form != self.form:
for row in form.forms:
row.cleaned_data[form.rel_name] = obj
form_obj = form.save(*args, **kwargs)
if form == self.form:
obj = form_obj
return obj
instance = self.form.instance
grouped = [self.form]
ungrouped = []
# cache the result of get_queryset so that it doesn't run inside the transaction
for bf in self.formsets:
if bf.formset.rel_name == 'parent':
grouped.append(bf.formset)
else:
ungrouped.append(bf.formset)
bf.formset_get_queryset = bf.formset.get_queryset
bf.formset.get_queryset = CachedQuerySet(bf.formset_get_queryset)
obj = db.run_in_transaction(save_forms, grouped)
save_forms(ungrouped, obj)
for bf in self.formsets:
bf.formset.get_queryset = bf.formset_get_queryset
del bf.formset_get_queryset
return obj
def _html_output(self, form_as, normal_row, help_text_html, sections_re, row_re):
formsets = SortedDict()
for bf in self.formsets:
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.form.label_suffix:
if label[-1] not in ':?.!':
label += self.form.label_suffix
label = label or ''
else:
label = ''
if bf.field.help_text:
help_text = help_text_html % force_unicode(bf.field.help_text)
else:
help_text = u''
formsets[bf.name] = normal_row % {'label': force_unicode(label), 'field': unicode(bf), 'help_text': help_text}
try:
output = []
data = form_as()
section_search = sections_re.search(data)
if not section_search:
output.append(data)
else:
section_groups = section_search.groups()
for row, head, item in row_re.findall(section_groups[1]):
head_search = label_re.search(head)
if head_search:
id = head_search.groups()[1]
if formsets.has_key(id):
row = formsets[id]
del formsets[id]
output.append(row)
for name, row in formsets.items():
if name in self.form.fields.keyOrder:
output.append(row)
return mark_safe(u'\n'.join(output))
except Exception,e:
import traceback
return traceback.format_exc()
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(self.form.as_table, u'<tr><th>%(label)s</th><td>%(help_text)s%(field)s</td></tr>', u'<br />%s', table_sections_re, table_row_re)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(self.form.as_ul, u'<li>%(label)s %(help_text)s%(field)s</li>', u' %s', ul_sections_re, ul_row_re)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(self.form.as_p, u'<p>%(label)s %(help_text)s</p>%(field)s', u' %s', p_sections_re, p_row_re)
def full_clean(self):
self.form.full_clean()
for bf in self.formsets:
bf.formset.full_clean()
def has_changed(self):
result = self.form.has_changed()
for bf in self.formsets:
result = bf.formset.has_changed() or result
return result
def is_multipart(self):
result = self.form.is_multipart()
for bf in self.formsets:
result = bf.formset.is_multipart() or result
return result
from django.forms.fields import Field
from django.forms.widgets import Widget
from django.forms.models import inlineformset_factory
class FormSetWidget(Widget):
def __init__(self, field, attrs=None):
super(FormSetWidget, self).__init__(attrs)
self.field = field
def render(self, name, value, attrs=None):
if value is None: value = 'FormWithSets decorator required to render %s FormSet' % self.field.model.__name__
value = force_unicode(value)
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(conditional_escape(value))
class FormSetField(Field):
def __init__(self, model, widget=FormSetWidget, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
formset_factory=inlineformset_factory, *args, **kwargs):
widget = widget(self)
super(FormSetField, self).__init__(required=False, widget=widget, label=label, initial=initial, help_text=help_text, error_messages=error_messages, show_hidden_initial=show_hidden_initial)
self.model = model
self.formset_factory = formset_factory
self.args = args
self.kwargs = kwargs
def make_formset(self, parent_model):
return self.formset_factory(parent_model, self.model, *self.args, **self.kwargs)
| gpl-2.0 |
dario-ramos/assimp_x3d | assimp-3.1.1/test/regression/settings.py | 15 | 5561 | #!/usr/bin/env python3
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2010, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""Shared settings for the regression suite (bold builder and
test scripts rely on this)
"""
import os
# -------------------------------------------------------------------------------
# List of file extensions to be excluded from the regression suite
# File extensions are case insensitive
# -------------------------------------------------------------------------------
exclude_extensions = [
".lws", ".assbin", ".assxml", ".txt", ".md",
".jpeg", ".jpg", ".png", ".gif", ".tga", ".bmp",
".skeleton", ".skeleton.xml"
]
# -------------------------------------------------------------------------------
# Post processing configurations to be included in the test. The
# strings are parameters for assimp_cmd, see assimp_cmd's doxydoc
# for more details.
# The defaults are (validate-data-structure is always enabled, for
# self-explanatory reasons :-):
#
# '-cfull' :apply all post processing except 'og' and 'ptv' (optimize-scenegraph)
# '-og -om' :run optimize-scenegraph in combination with optimize-meshes.
# '-vds -jiv' :join-identical-vertices alone. This is a hotspot where
# floating-point inaccuracies can cause severe damage.
# '-ptv': transform all meshes to world-space
# As you can see, not all possible combinations of pp steps are covered -
# but at least each step is executed at least once on each model.
# -------------------------------------------------------------------------------
pp_configs_to_test = [
"-cfull",
"-og -om -vds",
"-vds -jiv",
"-ptv -gsn -cts -db",
# this is especially important: if no failures are present with this
# preset, the regression is most likely caused by the post
# processing pipeline.
""
]
# -------------------------------------------------------------------------------
# Name of the regression database file to be used
# gen_db.py writes to this directory, run.py checks against this directory.
# If a zip file with the same name exists, its contents are favoured to a
# normal directory, so in order to test against unzipped files the ZIP needs
# to be deleted.
# -------------------------------------------------------------------------------
database_name = "db"
# -------------------------------------------------------------------------------
# List of directories to be processed. Paths are processed recursively.
# -------------------------------------------------------------------------------
model_directories = [
os.path.join("..","models"),
os.path.join("..","models-nonbsd")
]
# -------------------------------------------------------------------------------
# Remove the original database files after the ZIP has been built?
# -------------------------------------------------------------------------------
remove_old = True
# -------------------------------------------------------------------------------
# Bytes to skip at the beginning of a dump. This skips the file header, which
# is currently the same 500 bytes header for both assbin, assxml and minidumps.
# -------------------------------------------------------------------------------
dump_header_skip = 500
# -------------------------------------------------------------------------------
# Directory to write all results and logs to. The dumps pertaining to failed
# tests are written to a subfolder of this directory ('tmp').
# -------------------------------------------------------------------------------
results = os.path.join("..","results")
# Create results directory if it does not exist
if not os.path.exists(results):
os.makedirs(results)
# vim: ai ts=4 sts=4 et sw=4
| gpl-2.0 |
tdtrask/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_domain.py | 11 | 6364 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_domain
short_description: Create/delete a DNS record in DigitalOcean
description:
- Create/delete a DNS record in DigitalOcean.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
oauth_token:
description:
- DigitalOcean api token.
version_added: "1.9.5"
aliases: ['API_TOKEN']
id:
description:
- Numeric, the droplet id you want to operate on.
aliases: ['droplet_id']
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
ip:
description:
- The IP address to point a domain at.
notes:
- Environment variables DO_OAUTH_TOKEN can be used for the oauth_token.
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(oauth_token).
- If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
# Create a domain record
- digital_ocean_domain:
state: present
name: my.digitalocean.domain
ip: 127.0.0.1
# Create a droplet and a corresponding domain record
- digital_ocean:
state: present
name: test_droplet
size_id: 1gb
region_id: sgp1
image_id: ubuntu-14-04-x64
register: test_droplet
- digital_ocean_domain:
state: present
name: "{{ test_droplet.droplet.name }}.my.domain"
ip: "{{ test_droplet.droplet.ip_address }}"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import env_fallback
class DoManager(DigitalOceanHelper, object):
def __init__(self, module):
super(DoManager, self).__init__(module)
self.domain_name = module.params.get('name', None)
self.domain_ip = module.params.get('ip', None)
self.domain_id = module.params.get('id', None)
@staticmethod
def jsonify(response):
return response.status_code, response.json
def all_domains(self):
resp = self.get('domains/')
return resp
def find(self):
if self.domain_name is None and self.domain_id is None:
return False
domains = self.all_domains()
status, json = self.jsonify(domains)
for domain in json['domains']:
if domain['name'] == self.domain_name:
return True
return False
def add(self):
params = {'name': self.domain_name, 'ip_address': self.domain_ip}
resp = self.post('domains/', data=params)
status = resp.status_code
json = resp.json
if status == 201:
return json['domain']
else:
return json
def all_domain_records(self):
resp = self.get('domains/%s/records/' % self.domain_name)
return resp.json
def domain_record(self):
resp = self.get('domains/%s' % self.domain_name)
status, json = self.jsonify(resp)
return json
def destroy_domain(self):
resp = self.delete('domains/%s' % self.domain_name)
status, json = self.jsonify(resp)
if status == 204:
return True
else:
return json
def edit_domain_record(self):
params = {'name': self.domain_name}
resp = self.put('domains/%s/records/%s' % (self.domain_name, self.domain_id), data=params)
return resp['domain_record']
def core(module):
do_manager = DoManager(module)
state = module.params.get('state')
domain = do_manager.find()
if state == 'present':
if not domain:
domain = do_manager.add()
if 'message' in domain:
module.fail_json(changed=False, msg=domain['message'])
else:
module.exit_json(changed=True, domain=domain)
else:
records = do_manager.all_domain_records()
at_record = None
for record in records['domain_records']:
if record['name'] == "@" and record['type'] == 'A':
at_record = record
if not at_record['data'] == module.params.get('ip'):
do_manager.edit_domain_record()
module.exit_json(changed=True, domain=do_manager.find())
else:
module.exit_json(changed=False, domain=do_manager.domain_record())
elif state == 'absent':
if not domain:
module.fail_json(changed=False, msg="Domain not found")
else:
delete_event = do_manager.destroy_domain()
if not delete_event:
module.fail_json(changed=False, msg=delete_event['message'])
else:
module.exit_json(changed=True, event=None)
delete_event = do_manager.destroy_domain()
module.exit_json(changed=delete_event)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
oauth_token=dict(
aliases=['API_TOKEN'],
no_log=True,
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN'])
),
name=dict(type='str'),
id=dict(aliases=['droplet_id'], type='int'),
ip=dict(type='str'),
),
required_one_of=(
['id', 'name'],
),
)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.