repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
rosudrag/eve-wspace | evewspace/Alerts/migrations/0001_initial.py | 20 | 7215 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SubscriptionGroup'
db.create_table('Alerts_subscriptiongroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('desc', self.gf('django.db.models.fields.CharField')(max_length=200)),
('special', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('Alerts', ['SubscriptionGroup'])
# Adding model 'Subscription'
db.create_table('Alerts_subscription', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['Alerts.SubscriptionGroup'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='alert_groups', to=orm['auth.User'])),
))
db.send_create_signal('Alerts', ['Subscription'])
# Adding model 'SubscriptionGroupPermission'
db.create_table('Alerts_subscriptiongrouppermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='alert_groups', to=orm['auth.Group'])),
('sub_group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='group_permissions', to=orm['Alerts.SubscriptionGroup'])),
('can_broadcast', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_join', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('Alerts', ['SubscriptionGroupPermission'])
def backwards(self, orm):
# Deleting model 'SubscriptionGroup'
db.delete_table('Alerts_subscriptiongroup')
# Deleting model 'Subscription'
db.delete_table('Alerts_subscription')
# Deleting model 'SubscriptionGroupPermission'
db.delete_table('Alerts_subscriptiongrouppermission')
models = {
'Alerts.subscription': {
'Meta': {'object_name': 'Subscription'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Alerts.SubscriptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alert_groups'", 'to': "orm['auth.User']"})
},
'Alerts.subscriptiongroup': {
'Meta': {'object_name': 'SubscriptionGroup'},
'desc': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'through': "orm['Alerts.Subscription']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'Alerts.subscriptiongrouppermission': {
'Meta': {'object_name': 'SubscriptionGroupPermission'},
'can_broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_join': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sub_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_permissions'", 'to': "orm['Alerts.SubscriptionGroup']"}),
'user_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alert_groups'", 'to': "orm['auth.Group']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Alerts'] | gpl-3.0 |
Blake-Latchford/Bugs2 | rules/hexgrid.py | 1 | 1215 | from rules.hexcell import HexCell
class HexGrid:
"""A grid of HexCells.
Registered cells are preserved. As this represents the entire infinite hex
plane, any cell can be gotten. However, only registered cells are guaranteed
to be identical on future calls.
"""
def __init__(self):
self._registered_cells = {}
def get_cell(self, q, r):
"""Get the cell at the specified coordinates. If no cell is registered
at that location, create a temporary new cell."""
coords = (q, r)
if coords in self._registered_cells:
return self._registered_cells[coords]
return HexCell(q, r)
def register_cell(self, hex_cell):
"""Register a hex cell to be retained in the grid."""
assert (hex_cell.q, hex_cell.r) not in self._registered_cells
self._registered_cells[(hex_cell.q, hex_cell.r)] = hex_cell
def unregister_cell(self, hex_cell):
assert self._registered_cells[(hex_cell.q, hex_cell.r)] is hex_cell
self._registered_cells.pop((hex_cell.q, hex_cell.r), None)
def reset(self):
self._registered_cells = {}
def __repr__(self):
return repr(self._registered_cells)
| gpl-3.0 |
Optimus-L90/caf-kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
hesseltuinhof/mxnet | example/ssd/tools/caffe_converter/convert_model.py | 16 | 5232 | from __future__ import print_function
import sys
import os.path as osp
sys.path.insert(0, osp.join(osp.dirname(__file__), '..'))
import find_mxnet
import mxnet as mx
import numpy as np
import argparse
import re
from convert_symbol import proto2symbol
caffe_flag = True
try:
import caffe
except ImportError:
import caffe_parse.parse_from_protobuf as parse
caffe_flag = False
def get_caffe_iter(layer_names, layers):
for layer_idx, layer in enumerate(layers):
layer_name = re.sub('[-/]', '_', layer_names[layer_idx])
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
def get_iter(layers):
for layer in layers:
layer_name = re.sub('[-/]', '_', layer.name)
layer_type = layer.type
layer_blobs = layer.blobs
yield (layer_name, layer_type, layer_blobs)
def main():
parser = argparse.ArgumentParser(description='Caffe prototxt to mxnet model parameter converter.\
Note that only basic functions are implemented. You are welcomed to contribute to this file.')
parser.add_argument('caffe_prototxt', help='The prototxt file in Caffe format')
parser.add_argument('caffe_model', help='The binary model parameter file in Caffe format')
parser.add_argument('save_model_name', help='The name of the output model prefix')
args = parser.parse_args()
prob, input_dim = proto2symbol(args.caffe_prototxt)
layers = ''
layer_names = ''
if caffe_flag:
caffe.set_mode_cpu()
net_caffe = caffe.Net(args.caffe_prototxt, args.caffe_model, caffe.TEST)
layer_names = net_caffe._layer_names
layers = net_caffe.layers
else:
layers = parse.parse_caffemodel(args.caffe_model)
arg_shapes, output_shapes, aux_shapes = prob.infer_shape(data=tuple(input_dim))
arg_names = prob.list_arguments()
arg_shape_dic = dict(zip(arg_names, arg_shapes))
arg_params = {}
iter = ''
if caffe_flag:
iter = get_caffe_iter(layer_names, layers)
else:
iter = get_iter(layers)
first_conv = True
for layer_name, layer_type, layer_blobs in iter:
if layer_type == 'Convolution' or layer_type == 'InnerProduct' or layer_type == 4 or layer_type == 14 \
or layer_type == 'PReLU' or layer_type == 'Normalize':
if layer_type == 'PReLU':
assert (len(layer_blobs) == 1)
wmat = layer_blobs[0].data
weight_name = layer_name + '_gamma'
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
continue
if layer_type == 'Normalize':
assert (len(layer_blobs) == 1)
weight_name = layer_name + '_scale'
wmat = layer_blobs[0].data
arg_params[weight_name] = mx.nd.zeros((1, len(wmat), 1, 1))
arg_params[weight_name][:] = np.array(list(wmat)).reshape((1, len(wmat), 1, 1))
continue
assert (len(layer_blobs) == 2)
wmat_dim = []
if getattr(layer_blobs[0].shape, 'dim', None) is not None:
if len(layer_blobs[0].shape.dim) > 0:
wmat_dim = layer_blobs[0].shape.dim
else:
wmat_dim = [layer_blobs[0].num, layer_blobs[0].channels, layer_blobs[0].height,
layer_blobs[0].width]
else:
wmat_dim = list(layer_blobs[0].shape)
wmat = np.array(layer_blobs[0].data).reshape(wmat_dim)
bias = np.array(layer_blobs[1].data)
channels = wmat_dim[1]
if channels == 3 or channels == 4: # RGB or RGBA
if first_conv:
print('Swapping BGR of caffe into RGB in mxnet')
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
assert (wmat.flags['C_CONTIGUOUS'] is True)
assert (bias.flags['C_CONTIGUOUS'] is True)
print('converting layer {0}, wmat shape = {1}, bias shape = {2}'.format(layer_name, wmat.shape, bias.shape))
wmat = wmat.reshape((wmat.shape[0], -1))
bias = bias.reshape((bias.shape[0], 1))
weight_name = layer_name + "_weight"
bias_name = layer_name + "_bias"
if weight_name not in arg_shape_dic:
print(weight_name + ' not found in arg_shape_dic.')
continue
wmat = wmat.reshape(arg_shape_dic[weight_name])
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
bias = bias.reshape(arg_shape_dic[bias_name])
arg_params[bias_name] = mx.nd.zeros(bias.shape)
arg_params[bias_name][:] = bias
if first_conv and (layer_type == 'Convolution' or layer_type == 4):
first_conv = False
model = mx.mod.Module(symbol=prob, label_names=None)
model.bind(data_shapes=[('data', tuple(input_dim))])
model.init_params(arg_params=arg_params, aux_params={})
model.save_checkpoint(args.save_model_name, 1)
if __name__ == '__main__':
main()
| apache-2.0 |
eclipse-ease-addons/engines | jython/org.jython/Lib/Queue.py | 188 | 8561 | """A multi-producer, multi-consumer queue."""
from time import time as _time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| epl-1.0 |
ollej/shoutbridge | src/bridges/XmppPyBridge.py | 1 | 7001 | # -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2010 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import xmpp
import time
import re
from bridges.XmppBridge import *
from utils.utilities import *
class XmppPyBridge(XmppBridge):
login = ""
passwd = ""
room = ""
host = ""
port = 5222
discoName = "Shoutbridge"
shoutbox = None
roster = []
def __init__(self, sbox, cfg):
"""
Instantiate an XMPP bridge using XMPP login details and a shoutbox object.
"""
self.shoutbox = sbox
self.login = cfg.xmpp_login
self.passwd = cfg.xmpp_pass
self.host = cfg.xmpp_host
if cfg.xmpp_port:
self.port = cfg.xmpp_port
self.room = cfg.xmpp_room
# Make an XMPP connection
self.make_connection()
# Register handlers
self.register_handlers()
def __del__(self):
if self.cl:
self.cl.disconnect()
def make_connection(self):
"""
Make an XMPP connection and authorize the user.
"""
self.jid = xmpp.protocol.JID(self.login)
debug = xmpp.debug.Debug() #['always', 'nodebuilder']
self.cl = xmpp.Client(self.jid.getDomain(), debug=debug)
self.con = self.cl.connect()
#self.cl = xmpp.client.Component(self.jid.getDomain(), self.port, debug=debug)
#self.con = self.cl.connect((self.jid.getDomain(), self.port))
if not self.con:
raise BridgeConnectionError
print 'Connected with', self.con
self.auth = self.cl.auth(self.jid.getNode(), self.passwd, resource=self.jid.getResource())
if not self.auth:
raise BridgeAuthenticationError
print 'Authenticated using', self.auth
def register_handlers(self):
"""
Register message handlers
"""
self.cl.RegisterHandler('iq', self.handle_iq)
self.cl.RegisterHandler('presence', self.handle_presence)
self.cl.RegisterHandler('message', self.handle_message)
self.disco = xmpp.browser.Browser()
self.disco.PlugIn(self.cl)
self.disco.setDiscoHandler(self.xmpp_base_disco,node='', jid=self.login)
# Disco Handlers
def xmpp_base_disco(self, con, event, type):
fromjid = event.getFrom().__str__()
to = event.getTo()
node = event.getQuerynode();
#Type is either 'info' or 'items'
if to == self.login:
if node == None:
if type == 'info':
return {
'ids': [
{'category': 'gateway', 'type': 'smtp', 'name': self.discoName}],
'features': [NS_VERSION, NS_COMMANDS]}
if type == 'items':
return []
else:
self.cl.send(Error(event, ERR_ITEM_NOT_FOUND))
raise NodeProcessed
else:
self.cl.send(Error(event, MALFORMED_JID))
raise NodeProcessed
def handle_iq(self, conn, iq_node):
"""
Handler for processing some "get" query from custom namespace
"""
print "Iq stanza received:", iq_node.getType(), iq_node.getFrom().getResource()
reply = iq_node.buildReply('result')
# ... put some content into reply node
conn.send(reply)
raise NodeProcessed # This stanza is fully processed
def handle_presence(self, conn, pres):
nick = pres.getFrom().getResource()
type = pres.getType()
print "Presence stanza received:", nick, type
if type == 'unavailable':
if nick in self.roster:
self.roster.remove(nick)
print "Adding to roster:", nick
else:
if nick not in self.roster:
self.roster.append(nick)
print "Removing from roster:", nick
def handle_message(self, conn, mess):
"""
Handle an XMPP message.
"""
type = mess.getType()
fromjid = mess.getFrom().getStripped()
nick = mess.getFrom().getResource()
print "Message stanza received:", fromjid, '/', nick, type
if type in ['message', 'chat', None]:
# and fromjid == self.remotejid:
text = mess.getBody()
try:
user = self.shoutbox.getUserByLogin(fromjid)
except ShoutboxUserNotFoundError:
# Default to anonymous user with JID as username
user = User(1, nick, '', '')
self.shoutbox.sendShout(user, text)
def clean_message(self, text):
"""
Clean text of unwanted content.
"""
text = strip_tags(text)
return text
def send_message(self, tojid, text):
"""
Send an text as XMPP message to tojid
"""
try:
id = self.cl.send(xmpp.protocol.Message(tojid, text))
print 'Sent message with id', id
except UnicodeDecodeError:
print "Unicode Decode Error: " + text
def process_shoutbox_messages(self):
msgs = self.shoutbox.readShouts()
for m in msgs:
text = self.clean_message(m.text)
text = "%s <%s> %s" % (m.time, m.name, text)
self.send_message(self.room, text)
def listen(self):
"""
Start listening on XMPP and Shoutbox, relaying messages.
"""
try:
while 1:
print "Loop..."
# Process incoming XMPP messages.
self.cl.Process(5)
# Process shoutbox messages.
self.process_shoutbox_messages()
# Sleep before next loop iteration.
#time.sleep(1)
# Reconnect to XMPP if necessary.
if not self.cl.isConnected():
self.cl.reconnectAndReauth()
except KeyboardInterrupt:
print "Exiting..."
| mit |
nlu90/heron | heron/tools/tracker/src/python/handlers/containerfilehandler.py | 5 | 6770 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' containerfilehandler.py '''
import json
import traceback
import tornado.gen
from heron.common.src.python.utils.log import Log
from heron.tools.tracker.src.python.handlers import BaseHandler
from heron.tools.tracker.src.python import constants
from heron.tools.tracker.src.python import utils
# pylint: disable=attribute-defined-outside-init
class ContainerFileDataHandler(BaseHandler):
"""
URL - /topologies/containerfiledata?cluster=<cluster>&topology=<topology> \
&environ=<environment>&container=<container>
Parameters:
- cluster - Name of cluster.
- environ - Running environment.
- role - (optional) Role used to submit the topology.
- topology - Name of topology (Note: Case sensitive. Can only
include [a-zA-Z0-9-_]+)
- container - Container number
- path - Relative path to the file
- offset - From which to read the file
- length - How much data to read
Get the data from the file for the given topology, container
and path. The data being read is based on offset and length.
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
container = self.get_argument(constants.PARAM_CONTAINER)
path = self.get_argument(constants.PARAM_PATH)
offset = self.get_argument_offset()
length = self.get_argument_length()
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
stmgr_id = "stmgr-" + container
stmgr = topology_info["physical_plan"]["stmgrs"][stmgr_id]
host = stmgr["host"]
shell_port = stmgr["shell_port"]
file_data_url = "http://%s:%d/filedata/%s?offset=%s&length=%s" % \
(host, shell_port, path, offset, length)
http_client = tornado.httpclient.AsyncHTTPClient()
response = yield http_client.fetch(file_data_url)
self.write_success_response(json.loads(response.body))
self.finish()
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
# pylint: disable=attribute-defined-outside-init
class ContainerFileDownloadHandler(BaseHandler):
"""
URL - /topologies/containerfiledownload?cluster=<cluster>&topology=<topology> \
&environ=<environment>&container=<container>
Parameters:
- cluster - Name of cluster.
- environ - Running environment.
- role - (optional) Role used to submit the topology.
- topology - Name of topology (Note: Case sensitive. Can only
include [a-zA-Z0-9-_]+)
- container - Container number
- path - Relative path to the file
Download the file for the given topology, container and path.
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
container = self.get_argument(constants.PARAM_CONTAINER)
path = self.get_argument(constants.PARAM_PATH)
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
stmgr_id = "stmgr-" + container
stmgr = topology_info["physical_plan"]["stmgrs"][stmgr_id]
host = stmgr["host"]
shell_port = stmgr["shell_port"]
file_download_url = "http://%s:%d/download/%s" % (host, shell_port, path)
Log.debug("download file url: %s", file_download_url)
path = self.get_argument("path")
filename = path.split("/")[-1]
self.set_header("Content-Disposition", "attachment; filename=%s" % filename)
def streaming_callback(chunk):
self.write(chunk)
self.flush()
http_client = tornado.httpclient.AsyncHTTPClient()
yield http_client.fetch(file_download_url, streaming_callback=streaming_callback)
self.finish()
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
class ContainerFileStatsHandler(BaseHandler):
"""
URL - /topologies/containerfilestats?cluster=<cluster>&topology=<topology> \
&environ=<environment>&container=<container>
Parameters:
- cluster - Name of cluster.
- environ - Running environment.
- role - (optional) Role used to submit the topology.
- topology - Name of topology (Note: Case sensitive. Can only
include [a-zA-Z0-9-_]+)
- container - Container number
- path - Relative path to the directory
Get the dir stats for the given topology, container and path.
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
container = self.get_argument(constants.PARAM_CONTAINER)
path = self.get_argument(constants.PARAM_PATH, default=".")
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
stmgr_id = "stmgr-" + str(container)
stmgr = topology_info["physical_plan"]["stmgrs"][stmgr_id]
host = stmgr["host"]
shell_port = stmgr["shell_port"]
filestats_url = utils.make_shell_filestats_url(host, shell_port, path)
http_client = tornado.httpclient.AsyncHTTPClient()
response = yield http_client.fetch(filestats_url)
self.write_success_response(json.loads(response.body))
self.finish()
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
| apache-2.0 |
Diti24/python-ivi | ivi/agilent/agilent437B.py | 1 | 14103 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import pwrmeter
import time
Units = set(['dBm', 'Watts'])
class agilent437B(ivi.Driver, pwrmeter.Base, pwrmeter.ManualRange,
pwrmeter.DutyCycleCorrection, pwrmeter.AveragingCount,
pwrmeter.ZeroCorrection, pwrmeter.Calibration,
pwrmeter.ReferenceOscillator):
"Agilent 437B RF power meter"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '437B')
super(agilent437B, self).__init__(*args, **kwargs)
self._channel_count = 1
self._identity_description = "Agilent 437B RF power meter driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['437B']
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent437B, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
#if not self._driver_operation_simulate:
# error_code, error_message = self._ask(":system:error?").split(',')
# error_code = int(error_code)
# error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
code = int(self._ask("*TST?"))
if code != 0:
message = "Self test failed"
return (code, message)
raise ivi.OperationNotSupportedException()
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(agilent437B, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_averaging_count_auto = list()
self._channel_correction_frequency = list()
self._channel_offset = list()
self._channel_range_auto = list()
self._channel_units = list()
for i in range(self._channel_count):
self._channel_name.append("channel%d" % (i+1))
self._channel_averaging_count_auto.append(True)
self._channel_correction_frequency.append(50e6)
self._channel_offset.append(0.0)
self._channel_range_auto.append(True)
self._channel_units.append('dBm')
self.channels._set_list(self._channel_name)
def _get_channel_averaging_count_auto(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_averaging_count_auto[index]
def _set_channel_averaging_count_auto(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
if not value:
raise ivi.ValueNotSupportedException()
self._channel_averaging_count_auto[index] = value
def _get_channel_correction_frequency(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_correction_frequency[index]
def _set_channel_correction_frequency(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("FR%eEN" % (value))
self._channel_correction_frequency[index] = value
self._set_cache_valid(index=index)
def _get_channel_offset(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_offset[index]
def _set_channel_offset(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("OS%eEN" % (value))
self._channel_offset[index] = value
self._set_cache_valid(index=index)
def _get_channel_range_auto(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_auto[index]
def _set_channel_range_auto(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
self._channel_range_auto[index] = value
def _get_channel_units(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_units[index]
def _set_channel_units(self, index, value):
index = ivi.get_index(self._channel_name, index)
if value not in Units:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
if value == 'dBm':
self._write("LG")
elif value == 'Watts':
self._write("LN")
self._channel_units[index] = value
self._set_cache_valid(index=index)
def _get_measurement_measurement_state(self):
return self._measurement_measurement_state
def _measurement_abort(self):
self._clear()
pass
def _measurement_configure(self, operator, operand1, operand2):
pass
def _measurement_fetch(self):
if self._driver_operation_simulate:
return
val = self._read()
return float(val)
def _measurement_initiate(self):
if self._driver_operation_simulate:
return
self._write("TR1")
def _measurement_read(self, maximum_time):
self._measurement_initiate()
return self._measurement_fetch()
def _get_channel_range_lower(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_lower[index]
def _set_channel_range_lower(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_range_lower[index] = value
def _get_channel_range_upper(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_upper[index]
def _set_channel_range_upper(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_range_upper[index] = value
def _get_channel_duty_cycle_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_duty_cycle_enabled[index]
def _set_channel_duty_cycle_enabled(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write("DC%d" % int(value))
self._channel_duty_cycle_enabled[index] = value
self._set_cache_valid(index=index)
def _get_channel_duty_cycle_value(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_duty_cycle_value[index]
def _set_channel_duty_cycle_value(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("DY%eEN" % (value))
self._channel_duty_cycle_value[index] = value
self._set_cache_valid(index=index)
def _get_channel_averaging_count(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_averaging_count[index]
def _set_channel_averaging_count(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = int(value)
if not self._driver_operation_simulate:
self._write("FM%eEN" % (value))
self._channel_averaging_count[index] = value
self._set_cache_valid(index=index)
def _get_channel_zero_state(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_zero_state[index]
def _channel_zero(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return
self._write("CS")
self._write("ZE")
it = 0
while True:
val = self._read_stb()
if val & 2:
break
if val & 8 or it > 20:
return
time.sleep(0.5)
self._channel_zero_state[index] = 'complete'
def _get_channel_calibration_state(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_calibration_state[index]
def _channel_calibrate(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return
self._write("CS")
self._write("CLEN")
it = 0
while True:
val = self._read_stb()
if val & 2:
break
if val & 8 or it > 20:
return
time.sleep(0.5)
self._channel_calibration_state[index] = 'complete'
def _get_reference_oscillator_enabled(self):
return self._reference_oscillator_enabled
def _set_reference_oscillator_enabled(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("OC%d" % int(value))
self._reference_oscillator_enabled = value
self._set_cache_valid()
def _get_reference_oscillator_frequency(self):
return self._reference_oscillator_frequency
def _set_reference_oscillator_frequency(self, value):
value = float(value)
value = 50e6 # fixed at 50 MHz
self._reference_oscillator_frequency = value
def _get_reference_oscillator_level(self):
return self._reference_oscillator_level
def _set_reference_oscillator_level(self, value):
value = float(value)
value = 0.0 # fixed at 1.00 mW (0 dBm)
self._reference_oscillator_level = value
| mit |
adnanh/zulip | api/integrations/svn/zulip_svn_config.py | 124 | 2363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for the plugin
ZULIP_USER = "svn-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# commit_notice_destination() lets you customize where commit notices
# are sent to with the full power of a Python function.
#
# It takes the following arguments:
# * path = the path to the svn repository on the server
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification).
#
# The default code below will send every commit except for the "evil-master-plan"
# and "my-super-secret-repository" repos to
# * stream "commits"
# * topic "branch_name"
def commit_notice_destination(path, commit):
repo = path.split('/')[-1]
if repo not in ["evil-master-plan", "my-super-secret-repository"]:
return dict(stream = "commits",
subject = u"%s" % (repo,))
# Return None for cases where you don't want a notice sent
return None
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip server's API URI
ZULIP_SITE = "https://api.zulip.com"
| apache-2.0 |
anilmuthineni/tensorflow | tensorflow/python/ops/gradients_impl.py | 3 | 35591 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
continue
if y.dtype.is_real or y.dtype.is_integer:
if not grad_y.dtype.is_real and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
return grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is None:
continue
if grad.dtype.is_real:
if not inp.dtype.is_real:
raise TypeError("Gradient type %s generated for real-valued op %s "
"with type %s must be real" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
elif grad.dtype.is_complex:
if not inp.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued op %s"
" with type %s must be complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
else:
raise TypeError("Gradient type %s generated for op %s "
"with type %s must be either real or complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(name, "gradients", ys + xs + grad_ys):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
grad_fn = ops.get_default_graph()._get_function(
op.type).python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and _IsTrainable(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = grad_fn(op, *out_grads)
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _SymGrad(op, out_grads)
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if isinstance(in_grad, ops.Tensor):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_real_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_real_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_real_grad:
# For an unused exit, if it has floating-point outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unstack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.stack(_hess, name=name))
return hessians
| apache-2.0 |
kittolau/selepy | web_helper/name_generator/english_name_generator.py | 1 | 33021 | from web_helper.name_generator.abstract_name_generator import AbstractNameGenerator
from random import randint
class EnglishNameGenerator(AbstractNameGenerator):
#name pool from http://fantasynamegenerators.com/english_names.php
femaleFirstnames1 = ["Abbie","Abby","Abigail","Aimee","Alexandra","Alice","Alicia","Alisha","Amber","Amelia","Amelie","Amy","Anna","Ava","Bella","Bethany","Brooke","Caitlin","Cerys","Charlie","Charlotte","Chelsea","Chloe","Courtney","Daisy","Danielle","Demi","Eleanor","Eliza","Elizabeth","Ella","Ellie","Eloise","Elsie","Emilia","Emily","Emma","Erin","Esme","Eva","Eve","Evelyn","Evie","Faith","Freya","Georgia","Georgina","Grace","Gracie","Hannah","Harriet","Heidi","Hollie","Holly","Imogen","Isabel","Isabella","Isabelle","Isla","Isobel","Jade","Jasmine","Jennifer","Jessica","Jodie","Julia","Kate","Katherine","Katie","Kayla","Kayleigh","Keira","Lacey","Lara","Laura","Lauren","Layla","Leah","Lexi","Lexie","Libby","Lilly","Lily","Lola","Louise","Lucy","Lydia","Maddison","Madeleine","Madison","Maisie","Maisy","Maria","Martha","Matilda","Maya","Megan","Melissa","Mia","Millie","Mollie","Molly","Morgan","Mya","Naomi","Natasha","Niamh","Nicole","Olivia","Paige","Phoebe","Poppy","Rachel","Rebecca","Rose","Rosie","Ruby","Samantha","Sara","Sarah","Scarlett","Shannon","Sienna","Skye","Sofia","Sophia","Sophie","Summer","Tegan","Tia","Tilly","Victoria","Willow","Yasmin","Zara","Zoe"];
femaleFirstnames2 = ["Sophia","Emma","Isabella","Olivia","Ava","Emily","Abigail","Mia","Madison","Elizabeth","Chloe","Ella","Avery","Addison","Aubrey","Lily","Natalie","Sofia","Charlotte","Zoey","Grace","Hannah","Amelia","Harper","Lillian","Samantha","Evelyn","Victoria","Brooklyn","Zoe","Layla","Hailey","Leah","Kaylee","Anna","Aaliyah","Gabriella","Allison","Nevaeh","Alexis","Audrey","Savannah","Sarah","Alyssa","Claire","Taylor","Riley","Camila","Arianna","Ashley","Brianna","Sophie","Peyton","Bella","Khloe","Genesis","Alexa","Serenity","Kylie","Aubree","Scarlett","Stella","Maya","Katherine","Julia","Lucy","Madelyn","Autumn","Makayla","Kayla","Mackenzie","Lauren","Gianna","Ariana","Faith","Alexandra","Melanie","Sydney","Bailey","Caroline","Naomi","Morgan","Kennedy","Ellie","Jasmine","Eva","Skylar","Kimberly","Violet","Molly","Aria","Jocelyn","Trinity","London","Lydia","Madeline","Reagan","Piper","Andrea","Annabelle","Maria","Brooke","Payton","Paisley","Paige","Ruby","Nora","Mariah","Rylee","Lilly","Brielle","Jade","Destiny","Nicole","Mila","Kendall","Liliana","Kaitlyn","Natalia","Sadie","Jordyn","Vanessa","Mary","Mya","Penelope","Isabelle","Alice","Reese","Gabrielle","Hadley","Katelyn","Angelina","Rachel","Isabel","Eleanor","Clara","Brooklynn","Jessica","Elena","Aliyah","Vivian","Laila","Sara","Amy","Eliana","Lyla","Juliana","Valeria","Adriana","Makenzie","Elise","Mckenzie","Quinn","Delilah","Cora","Kylee","Rebecca","Gracie","Izabella","Josephine","Alaina","Michelle","Jennifer","Eden","Valentina","Aurora","Catherine","Stephanie","Valerie","Jayla","Willow","Daisy","Alana","Melody","Hazel","Summer","Melissa","Margaret","Kinsley","Kinley","Ariel","Lila","Giselle","Ryleigh","Haley","Julianna","Ivy","Alivia","Brynn","Keira","Daniela","Aniyah","Angela","Kate","Londyn","Hayden","Harmony","Adalyn","Megan","Allie","Gabriela","Alayna","Presley","Jenna","Alexandria","Ashlyn","Adrianna","Jada","Fiona","Norah","Emery","Maci","Miranda","Ximena","Amaya","Cecilia","Ana","Shelby","Katie","Hope","Callie","Jordan","Luna","Leilani","Eliza","Mckenna","Angel","Genevieve","Makenna","Isla","Lola","Danielle","Chelsea","Leila","Tessa","Adelyn","Camille","Mikayla","Adeline","Adalynn","Sienna","Esther","Jacqueline","Emerson","Arabella","Maggie","Athena","Lucia","Lexi","Ayla","Diana","Alexia","Juliet","Josie","Allyson","Addyson","Delaney","Teagan","Marley","Amber","Rose","Erin","Leslie","Kayleigh","Amanda","Kathryn","Kelsey","Emilia","Alina","Kenzie","Kaydence","Alicia","Alison","Paris","Sabrina","Ashlynn","Lilliana","Sierra","Cassidy","Laura","Alondra","Iris","Kyla","Christina","Carly","Jillian","Madilyn","Kyleigh","Madeleine","Cadence","Nina","Evangeline","Nadia","Raegan","Lyric","Giuliana","Briana","Georgia","Yaretzi","Elliana","Haylee","Fatima","Phoebe","Selena","Charlie","Dakota","Annabella","Abby","Daniella","Juliette","Lilah","Bianca","Mariana","Miriam","Parker","Veronica","Gemma","Noelle","Cheyenne","Marissa","Heaven","Vivienne","Brynlee","Joanna","Mallory","Aubrie","Journey","Nyla","Cali","Tatum","Carmen","Gia","Jazmine","Heidi","Miley","Baylee","Elaina","Macy","Ainsley","Jane","Raelynn","Anastasia","Adelaide","Ruth","Camryn","Kiara","Alessandra","Hanna","Finley","Maddison","Lia","Bethany","Karen","Kelly","Malia","Jazmin","Jayda","Esmeralda","Kira","Lena","Kamryn","Kamila","Karina","Eloise","Kara","Elisa","Rylie","Olive","Nayeli","Tiffany","Macie","Skyler","Addisyn","Angelica","Briella","Fernanda","Annie","Maliyah","Amiyah","Jayden","Charlee","Caitlyn","Elle","Crystal","Julie","Imani","Kendra","Talia","Angelique","Jazlyn","Guadalupe","Alejandra","Emely","Lucille","Anya","April","Elsie","Madelynn","Myla","Julissa","Scarlet","Helen","Breanna","Kyra","Madisyn","Rosalie","Brittany","Brylee","Jayleen","Arielle","Karla","Kailey","Arya","Sarai","Harley","Miracle","Kaelyn","Kali","Cynthia","Daphne","Aleah","Caitlin","Cassandra","Holly","Janelle","Marilyn","Katelynn","Kaylie","Itzel","Carolina","Bristol","Haven","Michaela","Monica","June","Janiyah","Camilla","Jamie","Rebekah","Audrina","Dayana","Lana","Serena","Tiana","Nylah","Braelyn","Savanna","Skye","Raelyn","Madalyn","Sasha","Perla","Bridget","Aniya","Rowan","Logan","Mckinley","Averie","Jaylah","Aylin","Joselyn","Nia","Hayley","Lilian","Adelynn","Jaliyah","Kassidy","Kaylin","Kadence","Celeste","Jaelyn","Zariah","Tatiana","Jimena","Lilyana","Anaya","Catalina","Viviana","Cataleya","Sloane","Courtney","Johanna","Amari","Melany","Anabelle","Francesca","Ada","Alanna","Priscilla","Danna","Angie","Kailyn","Lacey","Sage","Lillie","Brinley","Caylee","Joy","Kenley","Vera","Bailee","Amira","Aileen","Aspen","Emmalyn","Erica","Gracelyn","Kennedi","Skyla","Annalise","Danica","Dylan","Kiley","Gwendolyn","Jasmin","Lauryn","Aleena","Justice","Annabel","Tenley","Dahlia","Gloria","Lexie","Lindsey","Hallie","Sylvia","Elyse","Annika","Maeve","Marlee","Aryanna","Kenya","Lorelei","Selah","Kaliyah","Adele","Natasha","Brenda","Erika","Alyson","Braylee","Emilee","Raven","Ariella","Blakely","Liana","Jaycee","Sawyer","Anahi","Jaelynn","Elsa","Farrah","Cameron","Evelynn","Luciana","Zara","Madilynn","Eve","Kaia","Helena","Anne","Estrella","Leighton","Nataly","Whitney","Lainey","Amara","Anabella","Malaysia","Samara","Zoie","Amani","Phoenix","Dulce","Paola","Marie","Aisha","Harlow","Virginia","Ember","Regina","Jaylee","Anika","Ally","Kayden","Alani","Miah","Yareli","Journee","Kiera","Nathalie","Mikaela","Jaylynn","Litzy","Charley","Claudia","Aliya","Madyson","Cecelia","Liberty","Braelynn","Evie","Rosemary","Myah","Lizbeth","Giana","Ryan","Teresa","Ciara","Isis","Lea","Shayla","Jazlynn","Rosa","Gracelynn","Desiree","Elisabeth","Isabela","Arely","Mariam","Abbigail","Emersyn","Brenna","Kaylynn","Nova","Raquel","Dana","Laney","Laylah","Siena","Amelie","Clarissa","Lilianna","Lylah","Halle","Madalynn","Maleah","Sherlyn","Linda","Shiloh","Jessie","Kenia","Greta","Marina","Melina","Amiya","Bria","Natalee","Sariah","Mollie","Nancy","Christine","Felicity","Zuri","Irene","Simone","Amya","Matilda","Colette","Kristen","Paityn","Alayah","Janiya","Kallie","Mira","Hailee","Kathleen","Meredith","Janessa","Noemi","Aiyana","Aliana","Leia","Mariyah","Tori","Alissa","Ivanna","Joslyn","Sandra","Maryam","Saniyah","Kassandra","Danika","Denise","Jemma","River","Charleigh","Emelia","Kristina","Armani","Beatrice","Jaylene","Karlee","Blake","Cara","Addilyn","Amina","Ansley","Kaitlynn","Iliana","Mckayla","Adelina","Briley","Elaine","Lailah","Mercedes","Chaya","Lindsay","Hattie","Lisa","Marisol","Patricia","Bryanna","Taliyah","Adrienne","Emmy","Millie","Paislee","Charli","Kourtney","Leyla","Maia","Willa","Milan","Paula","Ayleen","Clare","Kensley","Reyna","Martha","Adley","Elianna","Emilie","Karsyn","Yasmin","Lorelai","Amirah","Aryana","Livia","Alena","Kiana","Celia","Kailee","Rylan","Ellen","Galilea","Kynlee","Leanna","Renata","Mae","Ayanna","Chanel","Lesly","Cindy","Carla","Pearl","Jaylin","Kimora","Angeline","Carlee","Aubri","Edith","Alia","Frances","Corinne","Jocelynn","Cherish","Wendy","Carolyn","Lina","Tabitha","Winter","Abril","Bryn","Jolie","Yaritza","Casey","Zion","Lillianna","Jordynn","Zariyah","Audriana","Jayde","Jaida","Salma","Diamond","Malaya","Kimber","Ryann","Abbie","Paloma","Destinee","Kaleigh","Asia","Demi","Yamileth","Deborah","Elin","Kaiya","Mara","Averi","Nola","Tara","Taryn","Emmalee","Aubrianna","Janae","Kyndall","Jewel","Zaniyah","Kaya","Sonia","Alaya","Heather","Nathaly","Shannon","Ariah","Avah","Giada","Lilith","Samiyah","Sharon","Coraline","Eileen","Julianne","Milania","Chana","Regan","Krystal","Rihanna","Sidney","Hadassah","Macey","Mina","Paulina","Rayne","Kaitlin","Maritza","Susan","Raina","Hana","Keyla","Temperance","Aimee","Alisson","Charlize","Kendal","Lara","Roselyn","Alannah","Alma","Dixie","Larissa","Patience","Taraji","Sky","Zaria","Aleigha","Alyvia","Aviana","Bryleigh","Elliot","Jenny","Luz","Ali","Alisha","Ayana","Campbell","Karis","Lilyanna","Azaria","Blair","Micah","Moriah","Myra","Lilia","Aliza","Giovanna","Karissa","Saniya","Emory","Estella","Juniper","Kairi","Kenna","Meghan","Abrielle","Elissa","Rachael","Emmaline","Jolene","Joyce","Britney","Carlie","Haylie","Judith","Renee","Saanvi","Yesenia","Barbara","Dallas","Jaqueline","Karma","America","Sariyah","Azalea","Everly","Ingrid","Lillyana","Emmalynn","Marianna","Brisa","Kaelynn","Leona","Libby","Deanna","Mattie","Miya","Kai","Annalee","Nahla","Dorothy","Kaylyn","Rayna","Araceli","Cambria","Evalyn","Haleigh","Thalia","Jakayla","Maliah","Saige","Avianna","Charity","Kaylen","Raylee","Tamia","Aubrielle","Bayleigh","Carley","Kailynn","Katrina","Belen","Karlie","Natalya","Alaysia","Celine","Milana","Monroe","Estelle","Meadow","Audrianna","Cristina","Harlee","Jazzlyn","Scarlette","Zahra","Akira","Ann","Collins","Kendyl","Anabel","Azariah","Carissa","Milena","Tia","Alisa","Bree","Carleigh","Cheyanne","Sarahi","Laurel","Kylah","Tinley","Kora","Marisa","Esme","Sloan","Cailyn","Gisselle","Kasey","Kyndal","Marlene","Riya","Annabell","Aubriana","Izabelle","Kirsten","Aya","Dalilah","Devyn","Geraldine","Analia","Hayleigh","Landry","Sofie","Tess","Ashtyn","Jessa","Katalina"];
neutralFirstnames = ["Aaren","Addison","Aiden","Alex","Alexis","Ali","Angel","Ash","Ashley","Ashton","Aubrey","Avery","Bailey","Bennie","Bev","Billie","Billy","Blair","Blake","Bret","Brett","Brice","Brook","Brynn","Caden","Cameron","Carmen","Carol","Casey","Charlie","Chris","Clem","Cory","Dane","Danni","Danny","Denny","Drew","Eli","Elliot","Emerson","Erin","Fran","Frankie","Franky","Gabby","Gabe","Gail","Gale","Gene","Glen","Glenn","Haiden","Harley","Harper","Hayden","Jackie","Jaden","Jaime","Jamie","Jess","Jesse","Jessie","Jo","Jody","Jordan","Jude","Justice","Kai","Kerry","Kiran","Kit","Kris","Lane","Lee","Leigh","Lesley","Leslie","Logan","Lynn","Maddox","Marley","Mason","Mel","Mell","Morgan","Nicky","Noel","Phoenix","Quinn","Ray","Raylee","Reed","Reggie","Rene","Riley","River","Robin","Rory","Rowan","Rudy","Ryan","Sam","Sammy","Shay","Sidney","Silver","Skye","Skylar","Skyler","Steff","Tanner","Taylor","Terry","Tyler","Val","Vic","Will","Willy"];
maleFirstnames1 = ["Aaron","Adam","Aidan","Aiden","Alex","Alexander","Alfie","Andrew","Anthony","Archie","Arthur","Ashton","Bailey","Ben","Benjamin","Billy","Blake","Bobby","Bradley","Brandon","Caleb","Callum","Cameron","Charles","Charlie","Christopher","Cody","Connor","Corey","Daniel","David","Declan","Dexter","Dominic","Dylan","Edward","Elliot","Ellis","Ethan","Evan","Ewan","Finlay","Finley","Frankie","Freddie","Frederick","Gabriel","George","Harley","Harrison","Harry","Harvey","Hayden","Henry","Isaac","Jack","Jackson","Jacob","Jake","James","Jamie","Jay","Jayden","Jenson","Joe","Joel","John","Jonathan","Jordan","Joseph","Josh","Joshua","Jude","Kai","Kayden","Kian","Kieran","Kyle","Leo","Leon","Lewis","Liam","Logan","Louie","Louis","Luca","Lucas","Luke","Mason","Matthew","Max","Michael","Morgan","Nathan","Nicholas","Noah","Oliver","Ollie","Oscar","Owen","Patrick","Peter","Reece","Reuben","Rhys","Riley","Robert","Rory","Ryan","Sam","Samuel","Scott","Sean","Sebastian","Spencer","Stanley","Taylor","Theo","Thomas","Toby","Tom","Tommy","Tyler","William","Zac","Zachary","Zak"];
maleFirstnames2 = ["Aaden","Aarav","Aaron","Abdiel","Abdullah","Abel","Abraham","Abram","Ace","Adam","Adan","Aden","Aditya","Adonis","Adrian","Adriel","Adrien","Agustin","Ahmad","Ahmed","Aidan","Aiden","Aidyn","Alan","Albert","Alberto","Alden","Aldo","Alec","Alejandro","Alessandro","Alex","Alexander","Alexis","Alexzander","Alfonso","Alfred","Alfredo","Ali","Alijah","Allan","Allen","Alonso","Alonzo","Alvaro","Alvin","Amare","Amari","Ameer","Amir","Amos","Anders","Anderson","Andre","Andres","Andrew","Andy","Angel","Angelo","Anthony","Antoine","Anton","Antonio","Apollo","Archer","Ari","Arian","Ariel","Arjun","Arlo","Armando","Armani","Arnav","Aron","Arthur","Arturo","Aryan","Asa","Asher","Ashton","Atticus","August","Augustine","Augustus","Austin","Austyn","Avery","Axel","Axton","Ayaan","Aydan","Ayden","Aydin","Barrett","Beau","Beckett","Beckham","Ben","Benjamin","Bennett","Benson","Bentlee","Bentley","Bently","Benton","Billy","Blaine","Blaise","Blake","Blaze","Bo","Bobby","Bodhi","Boston","Bowen","Braden","Bradley","Brady","Bradyn","Braeden","Braiden","Branden","Brandon","Branson","Brantley","Braxton","Brayan","Brayden","Braydon","Braylen","Braylon","Brecken","Brendan","Brenden","Brendon","Brennan","Brennen","Brent","Brentley","Brenton","Brett","Brian","Brice","Bridger","Briggs","Brock","Broderick","Brodie","Brody","Brogan","Bronson","Brooks","Bruce","Bruno","Bryan","Bryant","Bryce","Brycen","Bryson","Byron","Cade","Caden","Cael","Caiden","Cain","Cale","Caleb","Callan","Callen","Callum","Calvin","Camden","Camdyn","Cameron","Camilo","Camren","Camron","Camryn","Cannon","Carl","Carlos","Carmelo","Carson","Carter","Case","Casen","Casey","Cash","Cason","Cassius","Cayden","Cayson","Cedric","Cesar","Chace","Chad","Chaim","Chance","Chandler","Channing","Charles","Charlie","Chase","Chris","Christian","Christopher","Clark","Clay","Clayton","Clinton","Cody","Cohen","Colby","Cole","Coleman","Colin","Collin","Colt","Colten","Colton","Conner","Connor","Conor","Conrad","Cooper","Corbin","Corey","Cory","Craig","Crew","Cristian","Cristopher","Crosby","Cruz","Cullen","Curtis","Cyrus","Dakota","Dallas","Dalton","Damari","Damarion","Damian","Damien","Damion","Damon","Dane","Dangelo","Daniel","Danny","Dante","Darian","Dariel","Darien","Dario","Darius","Darnell","Darrell","Darren","Darryl","Darwin","Davian","David","Davin","Davion","Davis","Davon","Dawson","Dax","Daxton","Dayton","Deacon","Dean","Deandre","Deangelo","Declan","Deegan","Demarcus","Demetrius","Dennis","Denzel","Deon","Derek","Derick","Derrick","Deshawn","Desmond","Devan","Devin","Devon","Dexter","Diego","Dillon","Dominic","Dominick","Dominik","Dominique","Donald","Donovan","Donte","Dorian","Douglas","Drake","Draven","Drew","Duncan","Dustin","Dwayne","Dylan","Ean","Easton","Eddie","Eden","Edgar","Edison","Eduardo","Edward","Edwin","Efrain","Eli","Elian","Elias","Elijah","Eliot","Eliseo","Elisha","Elliot","Elliott","Ellis","Emanuel","Emerson","Emery","Emiliano","Emilio","Emmanuel","Emmett","Emmitt","Emory","Enrique","Enzo","Eric","Erick","Erik","Ernest","Ernesto","Esteban","Ethan","Eugene","Evan","Everett","Ezekiel","Ezequiel","Ezra","Fabian","Felipe","Felix","Fernando","Finley","Finn","Finnegan","Fisher","Fletcher","Flynn","Foster","Francis","Francisco","Franco","Frank","Frankie","Franklin","Freddy","Frederick","Gabriel","Gael","Gage","Gaige","Garrett","Gary","Gauge","Gavin","Gavyn","George","Gerald","Gerardo","Giancarlo","Gianni","Gibson","Gideon","Gilbert","Gilberto","Giovani","Giovanni","Giovanny","Grady","Graeme","Graham","Grant","Graysen","Grayson","Gregory","Greyson","Griffin","Guillermo","Gunnar","Gunner","Gustavo","Hamza","Hank","Harley","Harold","Harper","Harrison","Harry","Harvey","Hassan","Hayden","Hayes","Heath","Hector","Hendrix","Henry","Hezekiah","Holden","Houston","Howard","Hudson","Hugh","Hugo","Hunter","Ian","Ibrahim","Ignacio","Iker","Immanuel","Isaac","Isai","Isaiah","Isaias","Ishaan","Isiah","Ismael","Israel","Issac","Ivan","Izaiah","Izayah","Jabari","Jace","Jack","Jackson","Jacob","Jacoby","Jaden","Jadiel","Jadon","Jaeden","Jael","Jagger","Jaiden","Jaidyn","Jaime","Jairo","Jake","Jakob","Jalen","Jamal","Jamar","Jamari","Jamarion","James","Jameson","Jamie","Jamir","Jamison","Jared","Jarrett","Jase","Jasiah","Jason","Jasper","Javier","Javion","Javon","Jax","Jaxen","Jaxon","Jaxson","Jaxton","Jay","Jayce","Jaycob","Jayden","Jaydon","Jaylen","Jaylin","Jaylon","Jayson","Jedidiah","Jefferson","Jeffery","Jeffrey","Jensen","Jeremiah","Jeremy","Jermaine","Jerome","Jerry","Jesse","Jessie","Jesus","Jett","Jimmy","Jionni","Joaquin","Joe","Joel","Joey","Johan","Johann","John","Johnathan","Johnathon","Johnny","Jon","Jonah","Jonas","Jonathan","Jonathon","Jordan","Jorden","Jordyn","Jorge","Jose","Joseph","Joshua","Josiah","Josue","Jovani","Jovanni","Joziah","Juan","Judah","Jude","Juelz","Julian","Julien","Julio","Julius","Junior","Justice","Justin","Justus","Kade","Kaden","Kaeden","Kael","Kai","Kaiden","Kale","Kaleb","Kamari","Kamden","Kameron","Kamron","Kamryn","Kane","Kareem","Karsen","Karson","Karter","Kase","Kasen","Kash","Kason","Kayden","Kaysen","Kayson","Keagan","Keaton","Keegan","Keenan","Keith","Kellan","Kellen","Kelvin","Kendall","Kendrick","Kenneth","Kenny","Kevin","Khalil","Kian","Kieran","Killian","King","Kingsley","Kingston","Knox","Kobe","Kody","Kohen","Kolby","Kole","Kolten","Kolton","Konner","Konnor","Korbin","Krish","Kristian","Kristopher","Kylan","Kyle","Kylen","Kyler","Kymani","Kyree","Kyrie","Kyson","Lamar","Lance","Landen","Landon","Landry","Landyn","Lane","Larry","Lawrence","Lawson","Layne","Layton","Leandro","Lee","Legend","Leland","Lennon","Lennox","Leo","Leon","Leonard","Leonardo","Leonel","Leonidas","Leroy","Levi","Lewis","Liam","Lincoln","Lionel","Logan","London","Lorenzo","Louis","Luca","Lucas","Lucca","Lucian","Luciano","Luis","Luka","Lukas","Luke","Lyric","Mack","Madden","Maddox","Maison","Major","Makai","Makhi","Malachi","Malakai","Malaki","Malcolm","Malik","Manuel","Marc","Marcel","Marcelo","Marco","Marcos","Marcus","Mario","Mark","Markus","Marlon","Marquis","Marshall","Martin","Marvin","Masen","Mason","Mateo","Mathew","Mathias","Matias","Matteo","Matthew","Matthias","Maurice","Mauricio","Maverick","Max","Maxim","Maximilian","Maximiliano","Maximo","Maximus","Maxton","Maxwell","Mayson","Mekhi","Melvin","Memphis","Messiah","Micah","Michael","Micheal","Miguel","Mike","Miles","Milo","Misael","Mitchell","Mohamed","Mohammad","Mohammed","Moises","Morgan","Moses","Moshe","Muhammad","Mustafa","Myles","Nash","Nasir","Nathan","Nathanael","Nathaniel","Nehemiah","Neil","Nelson","Neymar","Nicholas","Nickolas","Nico","Nicolas","Niko","Nikolai","Nikolas","Nixon","Noah","Noe","Noel","Nolan","Oakley","Odin","Oliver","Omar","Omari","Orion","Orlando","Oscar","Osvaldo","Otto","Owen","Pablo","Parker","Patrick","Paul","Paxton","Payton","Pedro","Peter","Peyton","Philip","Phillip","Phoenix","Pierce","Porter","Preston","Prince","Princeton","Quentin","Quincy","Quinn","Quintin","Quinton","Rafael","Raiden","Ramiro","Ramon","Randall","Randy","Raphael","Rashad","Raul","Ray","Rayan","Rayden","Raylan","Raymond","Reagan","Reece","Reed","Reese","Reginald","Reid","Remington","Remy","Rene","Reuben","Rex","Rey","Rhett","Rhys","Ricardo","Richard","Ricky","Riley","River","Robert","Roberto","Rocco","Roderick","Rodney","Rodolfo","Rodrigo","Rogelio","Roger","Rohan","Roland","Rolando","Roman","Romeo","Ronald","Ronan","Ronin","Ronnie","Rory","Ross","Rowan","Rowen","Roy","Royce","Ruben","Rudy","Russell","Ryan","Ryder","Ryker","Rylan","Ryland","Rylee","Rylen","Sage","Salvador","Salvatore","Sam","Samir","Samson","Samuel","Santiago","Santino","Santos","Saul","Sawyer","Scott","Seamus","Sean","Sebastian","Semaj","Sergio","Seth","Shane","Shaun","Shawn","Sheldon","Sidney","Silas","Simeon","Simon","Sincere","Skylar","Skyler","Solomon","Sonny","Soren","Spencer","Stanley","Stefan","Stephen","Sterling","Steve","Steven","Sullivan","Sylas","Talon","Tanner","Tate","Tatum","Taylor","Teagan","Terrance","Terrell","Terrence","Terry","Thaddeus","Theo","Theodore","Thiago","Thomas","Timothy","Titan","Titus","Tobias","Toby","Todd","Tomas","Tommy","Tony","Trace","Travis","Trent","Trenton","Trevon","Trevor","Trey","Tripp","Tristan","Tristen","Tristian","Tristin","Triston","Troy","Truman","Trystan","Tucker","Turner","Ty","Tyler","Tyree","Tyrell","Tyrone","Tyson","Ulises","Uriah","Uriel","Urijah","Valentin","Valentino","Van","Vance","Vaughn","Vicente","Victor","Vihaan","Vincent","Vincenzo","Wade","Walker","Walter","Warren","Waylon","Wayne","Wesley","Westin","Weston","Will","William","Willie","Wilson","Winston","Wyatt","Xander","Xavi","Xavier","Xzavier","Yael","Yahir","Yandel","Yehuda","Yosef","Yousef","Yusuf","Zachariah","Zachary","Zackary","Zaid","Zaiden","Zain","Zaire","Zander","Zane","Zavier","Zayden","Zayne","Zechariah","Zeke","Zion"];
lastnames1 = ["Abbott","Acevedo","Acosta","Adams","Adkins","Aguilar","Aguirre","Albert","Alexander","Alford","Allen","Allison","Alston","Alvarado","Alvarez","Anderson","Andrews","Anthony","Armstrong","Arnold","Ashley","Atkins","Atkinson","Austin","Avery","Avila","Ayala","Ayers","Bailey","Baird","Baker","Baldwin","Ball","Ballard","Banks","Barber","Barker","Barlow","Barnes","Barnett","Barr","Barrera","Barrett","Barron","Barry","Bartlett","Barton","Bass","Bates","Battle","Bauer","Baxter","Beach","Bean","Beard","Beasley","Beck","Becker","Bell","Bender","Benjamin","Bennett","Benson","Bentley","Benton","Berg","Berger","Bernard","Berry","Best","Bird","Bishop","Black","Blackburn","Blackwell","Blair","Blake","Blanchard","Blankenship","Blevins","Bolton","Bond","Bonner","Booker","Boone","Booth","Bowen","Bowers","Bowman","Boyd","Boyer","Boyle","Bradford","Bradley","Bradshaw","Brady","Branch","Bray","Brennan","Brewer","Bridges","Briggs","Bright","Britt","Brock","Brooks","Brown","Browning","Bruce","Bryan","Bryant","Buchanan","Buck","Buckley","Buckner","Bullock","Burch","Burgess","Burke","Burks","Burnett","Burns","Burris","Burt","Burton","Bush","Butler","Byers","Byrd","Cabrera","Cain","Calderon","Caldwell","Calhoun","Callahan","Camacho","Cameron","Campbell","Campos","Cannon","Cantrell","Cantu","Cardenas","Carey","Carlson","Carney","Carpenter","Carr","Carrillo","Carroll","Carson","Carter","Carver","Case","Casey","Cash","Castaneda","Castillo","Castro","Cervantes","Chambers","Chan","Chandler","Chaney","Chang","Chapman","Charles","Chase","Chavez","Chen","Cherry","Christensen","Christian","Church","Clark","Clarke","Clay","Clayton","Clements","Clemons","Cleveland","Cline","Cobb","Cochran","Coffey","Cohen","Cole","Coleman","Collier","Collins","Colon","Combs","Compton","Conley","Conner","Conrad","Contreras","Conway","Cook","Cooke","Cooley","Cooper","Copeland","Cortez","Cote","Cotton","Cox","Craft","Craig","Crane","Crawford","Crosby","Cross","Cruz","Cummings","Cunningham","Curry","Curtis","Dale","Dalton","Daniel","Daniels","Daugherty","Davenport","David","Davidson","Davis","Dawson","Day","Dean","Decker","Dejesus","Delacruz","Delaney","Deleon","Delgado","Dennis","Diaz","Dickerson","Dickson","Dillard","Dillon","Dixon","Dodson","Dominguez","Donaldson","Donovan","Dorsey","Dotson","Douglas","Downs","Doyle","Drake","Dudley","Duffy","Duke","Duncan","Dunlap","Dunn","Duran","Durham","Dyer","Eaton","Edwards","Elliott","Ellis","Ellison","Emerson","England","English","Erickson","Espinoza","Estes","Estrada","Evans","Everett","Ewing","Farley","Farmer","Farrell","Faulkner","Ferguson","Fernandez","Ferrell","Fields","Figueroa","Finch","Finley","Fischer","Fisher","Fitzgerald","Fitzpatrick","Fleming","Fletcher","Flores","Flowers","Floyd","Flynn","Foley","Forbes","Ford","Foreman","Foster","Fowler","Fox","Francis","Franco","Frank","Franklin","Franks","Frazier","Frederick","Freeman","French","Frost","Fry","Frye","Fuentes","Fuller","Fulton","Gaines","Gallagher","Gallegos","Galloway","Gamble","Garcia","Gardner","Garner","Garrett","Garrison","Garza","Gates","Gay","Gentry","George","Gibbs","Gibson","Gilbert","Giles","Gill","Gillespie","Gilliam","Gilmore","Glass","Glenn","Glover","Goff","Golden","Gomez","Gonzales","Gonzalez","Good","Goodman","Goodwin","Gordon","Gould","Graham","Grant","Graves","Gray","Green","Greene","Greer","Gregory","Griffin","Griffith","Grimes","Gross","Guerra","Guerrero","Guthrie","Gutierrez","Guy","Guzman","Hahn","Hale","Haley","Hall","Hamilton","Hammond","Hampton","Hancock","Haney","Hansen","Hanson","Hardin","Harding","Hardy","Harmon","Harper","Harrell","Harrington","Harris","Harrison","Hart","Hartman","Harvey","Hatfield","Hawkins","Hayden","Hayes","Haynes","Hays","Head","Heath","Hebert","Henderson","Hendricks","Hendrix","Henry","Hensley","Henson","Herman","Hernandez","Herrera","Herring","Hess","Hester","Hewitt","Hickman","Hicks","Higgins","Hill","Hines","Hinton","Hobbs","Hodge","Hodges","Hoffman","Hogan","Holcomb","Holden","Holder","Holland","Holloway","Holman","Holmes","Holt","Hood","Hooper","Hoover","Hopkins","Hopper","Horn","Horne","Horton","House","Houston","Howard","Howe","Howell","Hubbard","Huber","Hudson","Huff","Huffman","Hughes","Hull","Humphrey","Hunt","Hunter","Hurley","Hurst","Hutchinson","Hyde","Ingram","Irwin","Jackson","Jacobs","Jacobson","James","Jarvis","Jefferson","Jenkins","Jennings","Jensen","Jimenez","Johns","Johnson","Johnston","Jones","Jordan","Joseph","Joyce","Joyner","Juarez","Justice","Kane","Kaufman","Keith","Keller","Kelley","Kelly","Kemp","Kennedy","Kent","Kerr","Key","Kidd","Kim","King","Kinney","Kirby","Kirk","Kirkland","Klein","Kline","Knapp","Knight","Knowles","Knox","Koch","Kramer","Lamb","Lambert","Lancaster","Landry","Lane","Lang","Langley","Lara","Larsen","Larson","Lawrence","Lawson","Leach","Leblanc","Lee","Leon","Leonard","Lester","Levine","Levy","Lewis","Lindsay","Lindsey","Little","Livingston","Lloyd","Logan","Long","Lopez","Lott","Love","Lowe","Lowery","Lucas","Luna","Lynch","Lynn","Lyons","Macdonald","Macias","Mack","Madden","Maddox","Maldonado","Malone","Mann","Manning","Marks","Marquez","Marsh","Marshall","Martin","Martinez","Mason","Massey","Mathews","Mathis","Matthews","Maxwell","May","Mayer","Maynard","Mayo","Mays","Mcbride","Mccall","Mccarthy","Mccarty","Mcclain","Mcclure","Mcconnell","Mccormick","Mccoy","Mccray","Mccullough","Mcdaniel","Mcdonald","Mcdowell","Mcfadden","Mcfarland","Mcgee","Mcgowan","Mcguire","Mcintosh","Mcintyre","Mckay","Mckee","Mckenzie","Mckinney","Mcknight","Mclaughlin","Mclean","Mcleod","Mcmahon","Mcmillan","Mcneil","Mcpherson","Meadows","Medina","Mejia","Melendez","Melton","Mendez","Mendoza","Mercado","Mercer","Merrill","Merritt","Meyer","Meyers","Michael","Middleton","Miles","Miller","Mills","Miranda","Mitchell","Molina","Monroe","Montgomery","Montoya","Moody","Moon","Mooney","Moore","Morales","Moran","Moreno","Morgan","Morin","Morris","Morrison","Morrow","Morse","Morton","Moses","Mosley","Moss","Mueller","Mullen","Mullins","Munoz","Murphy","Murray","Myers","Nash","Navarro","Neal","Nelson","Newman","Newton","Nguyen","Nichols","Nicholson","Nielsen","Nieves","Nixon","Noble","Noel","Nolan","Norman","Norris","Norton","Nunez","O'brien","O'connor","O'donnell","O'neal","O'neil","O'neill","Ochoa","Odom","Oliver","Olsen","Olson","Orr","Ortega","Ortiz","Osborn","Osborne","Owen","Owens","Pace","Pacheco","Padilla","Page","Palmer","Park","Parker","Parks","Parrish","Parsons","Pate","Patel","Patrick","Patterson","Patton","Paul","Payne","Pearson","Peck","Pena","Pennington","Perez","Perkins","Perry","Peters","Petersen","Peterson","Petty","Phelps","Phillips","Pickett","Pierce","Pittman","Pitts","Pollard","Poole","Pope","Porter","Potter","Potts","Powell","Powers","Pratt","Preston","Price","Prince","Pruitt","Puckett","Pugh","Quinn","Ramirez","Ramos","Ramsey","Randall","Randolph","Rasmussen","Ratliff","Ray","Raymond","Reed","Reese","Reeves","Reid","Reilly","Reyes","Reynolds","Rhodes","Rice","Rich","Richard","Richards","Richardson","Richmond","Riddle","Riggs","Riley","Rios","Rivas","Rivera","Rivers","Roach","Robbins","Roberson","Roberts","Robertson","Robinson","Robles","Rodgers","Rodriguez","Rodriquez","Rogers","Rojas","Rollins","Roman","Romero","Rosa","Rosales","Rosario","Rose","Ross","Roth","Rowe","Rowland","Roy","Ruiz","Rush","Russell","Russo","Rutledge","Ryan","Salas","Salazar","Salinas","Sampson","Sanchez","Sanders","Sandoval","Sanford","Santana","Santiago","Santos","Sargent","Saunders","Savage","Sawyer","Schmidt","Schneider","Schroeder","Schultz","Schwartz","Scott","Sears","Sellers","Serrano","Sexton","Shaffer","Shannon","Sharp","Sharpe","Shaw","Shelton","Shepard","Shepherd","Sheppard","Sherman","Shields","Short","Silva","Simmons","Simon","Simpson","Sims","Singleton","Skinner","Slater","Sloan","Small","Smith","Snider","Snow","Snyder","Solis","Solomon","Sosa","Soto","Sparks","Spears","Spence","Spencer","Stafford","Stanley","Stanton","Stark","Steele","Stein","Stephens","Stephenson","Stevens","Stevenson","Stewart","Stokes","Stone","Stout","Strickland","Strong","Stuart","Suarez","Sullivan","Summers","Sutton","Swanson","Sweeney","Sweet","Sykes","Talley","Tanner","Tate","Taylor","Terrell","Terry","Thomas","Thompson","Thornton","Tillman","Todd","Torres","Townsend","Tran","Travis","Trevino","Trujillo","Tucker","Turner","Tyler","Tyson","Underwood","Valdez","Valencia","Valentine","Valenzuela","Vance","Vargas","Vasquez","Vaughan","Vaughn","Vazquez","Vega","Velasquez","Velez","Villarreal","Vincent","Vinson","Wade","Wagner","Walker","Wall","Wallace","Waller","Walls","Walsh","Walter","Walters","Walton","Ward","Ware","Warner","Warren","Washington","Waters","Watkins","Watson","Watts","Weaver","Webb","Weber","Webster","Weeks","Weiss","Welch","Wells","West","Wheeler","Whitaker","White","Whitehead","Whitfield","Whitley","Whitney","Wiggins","Wilcox","Wilder","Wiley","Wilkerson","Wilkins","Wilkinson","William","Williams","Williamson","Willis","Wilson","Winters","Wise","Witt","Wolf","Wolfe","Wong","Wood","Woodard","Woods","Woodward","Wooten","Workman","Wright","Wyatt","Wynn","Yang","Yates","York","Young","Zamora","Zimmerman"];
lastnames2 = ["Adams","Allen","Anderson","Andrews","Armstrong","Atkinson","Austin","Bailey","Baker","Ball","Barker","Barnes","Barrett","Bates","Baxter","Bell","Bennett","Berry","Black","Booth","Bradley","Brooks","Brown","Burke","Burns","Burton","Butler","Byrne","Campbell","Carr","Carter","Chambers","Chapman","Clark","Clarke","Cole","Collins","Cook","Cooke","Cooper","Cox","Cunningham","Davidson","Davies","Davis","Dawson","Day","Dean","Dixon","Doyle","Duncan","Edwards","Elliott","Ellis","Evans","Fisher","Fletcher","Foster","Fox","Francis","Fraser","Gallagher","Gardner","George","Gibson","Gill","Gordon","Graham","Grant","Gray","Green","Griffiths","Hall","Hamilton","Harper","Harris","Harrison","Hart","Harvey","Hawkins","Hayes","Henderson","Hill","Holland","Holmes","Hopkins","Houghton","Howard","Hudson","Hughes","Hunt","Hunter","Hussain","Jackson","James","Jenkins","John","Johnson","Johnston","Jones","Jordan","Kaur","Kelly","Kennedy","Khan","King","Knight","Lane","Lawrence","Lawson","Lee","Lewis","Lloyd","Lowe","Macdonald","Marsh","Marshall","Martin","Mason","Matthews","May","Mccarthy","Mcdonald","Miller","Mills","Mitchell","Moore","Morgan","Morris","Moss","Murphy","Murray","Newman","Nicholson","Owen","Palmer","Parker","Parry","Patel","Pearce","Pearson","Perry","Phillips","Poole","Porter","Powell","Price","Read","Rees","Reid","Reynolds","Richards","Richardson","Riley","Roberts","Robertson","Robinson","Rogers","Rose","Ross","Russell","Ryan","Saunders","Scott","Sharp","Shaw","Simpson","Smith","Spencer","Stevens","Stewart","Stone","Sutton","Taylor","Thomas","Thompson","Thomson","Turner","Walker","Wallace","Walsh","Ward","Watson","Watts","Webb","Wells","West","White","Wilkinson","Williams","Williamson","Willis","Wilson","Wood","Woods","Wright","Young"];
def __init__(self):
super(EnglishNameGenerator, self).__init__()
def getMaleName(self):
if randint(0,1) == 1:
return self.nameGen(self.maleFirstnames1,self.lastnames1)
else:
return self.nameGen(self.maleFirstnames2,self.lastnames2)
def getNeutralName(self):
if randint(0,1) == 1:
return self.nameGen(self.neutralFirstnames,self.lastnames1)
else:
return self.nameGen(self.neutralFirstnames,self.lastnames2)
def getFemaleName(self):
if randint(0,1) == 1:
return self.nameGen(self.femaleFirstnames1,self.lastnames1)
else:
return self.nameGen(self.femaleFirstnames2,self.lastnames2)
| mit |
Event38/MissionPlanner | Lib/site-packages/numpy/lib/utils.py | 54 | 36175 | import os
import sys
import types
import re
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import product, ndarray, ufunc
__all__ = ['issubclass_', 'get_numpy_include', 'issubsctype', 'issubdtype',
'deprecate', 'deprecate_with_doc', 'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds',
'may_share_memory', 'safe_eval']
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def get_numarray_include(type=None):
"""
Return the directory that contains the numarray \\*.h header files.
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
Parameters
----------
type : any, optional
If `type` is not None, the location of the NumPy headers is returned
as well.
Returns
-------
dirs : str or list of str
If `type` is None, `dirs` is a string containing the path to the
numarray headers.
If `type` is not None, `dirs` is a list of strings with first the
path(s) to the numarray headers, followed by the path to the NumPy
headers.
Notes
-----
Useful when using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_numarray_include()])
...
"""
from numpy.numarray import get_numarray_include_dirs
include_dirs = get_numarray_include_dirs()
if type is None:
return include_dirs[0]
else:
return include_dirs + [get_include()]
if sys.version_info < (2, 4):
# Can't set __name__ in 2.3
import new
def _set_function_name(func, name):
func = new.function(func.func_code, func.func_globals,
name, func.func_defaults, func.func_closure)
return func
else:
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.func_name
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in which
case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case
the deprecation message is that `old_name` is deprecated. If given,
the deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the docstring
after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
get_numpy_include = deprecate(get_include, 'get_numpy_include', 'get_include')
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
nd_a = len(ashape)
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += product(ashape, dtype=int)*bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
def may_share_memory(a, b):
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
"""
a_low, a_high = byte_bounds(a)
b_low, b_high = byte_bounds(b)
if b_low >= a_high or a_low >= b_high:
return False
return True
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays present
in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name],ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original=0
else:
cache[idv] = name
namestr = name
original=1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10,maxname)
sp2 = max(10,maxshape)
sp3 = max(10,maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print prval + "\n" + "="*(len(prval)+5) + "\n"
for k in range(len(sta)):
val = sta[k]
print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3])
print "\nUpper bound on total bytes = %d" % totalbytes
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while 1:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x],types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is ``stdout``.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent to
``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc, inspect
if hasattr(object,'_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
import numpy.numarray as nn
nn.info(object, output=output, numpy=1)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print >> output, "\n *** Repeat reference found in %s *** " % namestr
else:
objlist.append(id(obj))
print >> output, " *** Found in %s ***" % namestr
info(obj)
print >> output, "-"*maxwidth
numfound += 1
except KeyError:
pass
if numfound == 0:
print >> output, "Help for %s not found." % object
else:
print >> output, "\n *** Total of %d references found. ***" % numfound
elif inspect.isfunction(object):
name = object.func_name
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
print >> output, inspect.getdoc(object.__init__)
else:
print >> output, inspect.getdoc(object)
methods = pydoc.allmethods(object)
if methods != []:
print >> output, "\n\nMethods:\n"
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print >> output, " %s -- %s" % (meth, methstr)
elif type(object) is types.InstanceType: ## check for __call__ method
print >> output, "Instance of class: ", object.__class__.__name__
print >> output
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object,'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc = inspect.getdoc(object.__call__)
if doc is not None:
print >> output, inspect.getdoc(object.__call__)
print >> output, inspect.getdoc(object)
else:
print >> output, inspect.getdoc(object)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif hasattr(object, '__doc__'):
print >> output, inspect.getdoc(object)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module, ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print >> output, "In file: %s\n" % inspect.getsourcefile(object)
print >> output, inspect.getsource(object)
except:
print >> output, "Not available for this object."
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats: return
for name, (docstring, kind, index) in cache.iteritems():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print "\n".join(help_text)
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate: bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
from cStringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen: continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if os.path.isfile(this_py) and mod_path.endswith('.py'):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
doc = inspect.getdoc(item)
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except AttributeError:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
For an example of usage, see `safe_eval`.
See Also
--------
safe_eval
"""
if sys.version_info[0] < 3:
def visit(self, node, **kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnaryAdd(self, node, **kw):
return +self.visit(node.getChildNodes()[0])
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
def visitName(self, node, **kw):
if node.name == 'False':
return False
elif node.name == 'True':
return True
elif node.name == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.name)
else:
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains non-literal
code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local import to speed up numpy's import time.
try:
import compiler
except ImportError:
import ast as compiler
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except SyntaxError, err:
raise
#-----------------------------------------------------------------------------
| gpl-3.0 |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/encodings/mac_roman.py | 272 | 13480 | """ Python Character Mapping Codec mac_roman generated from 'MAPPINGS/VENDORS/APPLE/ROMAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-roman',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\xb4' # 0xAB -> ACUTE ACCENT
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
'\u221e' # 0xB0 -> INFINITY
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\xa5' # 0xB4 -> YEN SIGN
'\xb5' # 0xB5 -> MICRO SIGN
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u220f' # 0xB8 -> N-ARY PRODUCT
'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
'\u222b' # 0xBA -> INTEGRAL
'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
'\xe6' # 0xBE -> LATIN SMALL LETTER AE
'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
'\xbf' # 0xC0 -> INVERTED QUESTION MARK
'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
'\u2248' # 0xC5 -> ALMOST EQUAL TO
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u2044' # 0xDA -> FRACTION SLASH
'\u20ac' # 0xDB -> EURO SIGN
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufb01' # 0xDE -> LATIN SMALL LIGATURE FI
'\ufb02' # 0xDF -> LATIN SMALL LIGATURE FL
'\u2021' # 0xE0 -> DOUBLE DAGGER
'\xb7' # 0xE1 -> MIDDLE DOT
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u2030' # 0xE4 -> PER MILLE SIGN
'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\uf8ff' # 0xF0 -> Apple logo
'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u02dc' # 0xF7 -> SMALL TILDE
'\xaf' # 0xF8 -> MACRON
'\u02d8' # 0xF9 -> BREVE
'\u02d9' # 0xFA -> DOT ABOVE
'\u02da' # 0xFB -> RING ABOVE
'\xb8' # 0xFC -> CEDILLA
'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
'\u02db' # 0xFE -> OGONEK
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
LogicalDash/kivy | kivy/tests/test_uix_stacklayout.py | 55 | 4058 | '''
uix.stacklayout tests
=====================
'''
import unittest
from kivy.uix.stacklayout import StackLayout
from kivy.uix.widget import Widget
class UixStackLayoutTest(unittest.TestCase):
def test_stacklayout_no_children(self):
sl = StackLayout()
sl.do_layout()
def test_stacklayout_default(self):
# Default orientation is lr-tb.
sl = StackLayout()
wgts = [Widget(size_hint=(.5, .5)) for i in range(4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.do_layout()
self.assertEqual(wgts[0].pos, [0, sl.height / 2.])
self.assertEqual(wgts[1].pos, [sl.width / 2., sl.height / 2.])
self.assertEqual(wgts[2].pos, [0, 0])
self.assertEqual(wgts[3].pos, [sl.width / 2., 0])
def test_stacklayout_fixed_size(self):
sl = StackLayout()
wgts = [Widget(size=(50, 50), size_hint=(None, None))
for i in range(4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.do_layout()
self.assertEqual(wgts[0].pos, [0, sl.height / 2.])
self.assertEqual(wgts[1].pos, [sl.width / 2., sl.height / 2.])
self.assertEqual(wgts[2].pos, [0, 0])
self.assertEqual(wgts[3].pos, [sl.width / 2., 0])
def test_stacklayout_orientation_btrl(self):
# Default orientation is lr-tb.
sl = StackLayout()
wgts = [Widget(size_hint=(.5, .5)) for i in range(4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.orientation = 'bt-rl'
sl.do_layout()
self.assertEqual(wgts[0].pos, [sl.width / 2., 0])
self.assertEqual(wgts[1].pos, [sl.width / 2., sl.height / 2.])
self.assertEqual(wgts[2].pos, [0, 0])
self.assertEqual(wgts[3].pos, [0, sl.height / 2.])
def test_stacklayout_orientation_rlbt(self):
# Default orientation is lr-tb.
sl = StackLayout()
wgts = [Widget(size_hint=(.5, .5)) for i in range(4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.orientation = 'rl-bt'
sl.do_layout()
self.assertEqual(wgts[0].pos, [sl.width / 2., 0])
self.assertEqual(wgts[1].pos, [0, 0])
self.assertEqual(wgts[2].pos, [sl.width / 2., sl.height / 2.])
self.assertEqual(wgts[3].pos, [0, sl.height / 2.])
def test_stacklayout_padding(self):
sl = StackLayout()
wgts = [Widget(size_hint=(.5, .5)) for i in range(4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.padding = 5.
sl.do_layout()
self.assertEqual(wgts[0].pos, [5., sl.height / 2.])
self.assertEqual(wgts[1].pos, [sl.width / 2., sl.height / 2.])
self.assertEqual(wgts[2].pos, [5., 5.])
self.assertEqual(wgts[3].pos, [sl.width / 2., 5.])
def test_stacklayout_spacing(self):
sl = StackLayout()
wgts = [Widget(size_hint=(.5, .5)) for i in range(4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.spacing = 10
sl.do_layout()
self.assertEqual(wgts[0].pos, [0, sl.height / 2.])
self.assertEqual(wgts[1].pos, [sl.width / 2. + 5, sl.height / 2.])
self.assertEqual(wgts[2].pos, [0, -10])
self.assertEqual(wgts[3].pos, [sl.width / 2. + 5, -10])
def test_stacklayout_overflow(self):
sl = StackLayout()
wgts = [Widget(size_hint=(.2 * i, .2 * i)) for i in range(1, 4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.padding = 5
sl.spacing = 5
sl.do_layout()
self.assertEqual(wgts[0].pos, [5, 77])
self.assertEqual(wgts[1].pos, [27, 59])
# floating point error, requires almost equal
self.assertAlmostEqual(wgts[2].pos[0], 5)
self.assertAlmostEqual(wgts[2].pos[1], 0)
def test_stacklayout_nospace(self):
# happens when padding is too big
sl = StackLayout()
wgts = [Widget(size_hint=(1., .25)) for i in range(1, 4)]
for wgt in wgts:
sl.add_widget(wgt)
sl.padding = 10
sl.do_layout()
| mit |
CSysTeam/SecurityPackage | MainAlgorithms/Monoalphabetics/Monoalphabetic.py | 1 | 3288 |
class Monoalphabetic:
""" Frequency Information:
E 12.51%
T 9.25
A 8.04
O 7.60
I 7.26
N 7.09
S 6.54
R 6.12
H 5.49
L 4.14
D 3.99
C 3.06
U 2.71
M 2.53
F 2.30
P 2.00
G 1.96
W 1.92
Y 1.73
B 1.54
V 0.99
K 0.67
X 0.19
J 0.16
Q 0.11
Z 0.09
"""
def analyse(self, plainText: str, cipherText: str) -> str:
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
mainKey=""
temp = plainText.upper()
i = 0
while i < len(plainText):
index = LETTERS.index(temp[i])
mainKey = mainKey[:index] + cipherText[i] + mainKey[index+1:]
i = i + 1
temp = mainKey
temp = temp.upper()
i = 0
while i < len(temp):
if temp[i] == '-':
index = LETTERS.index(temp[i - 1])
if index == 25:
index = -1
temp = temp[:i] + LETTERS[index + 1] + temp[i + 1:]
i = i + 1
temp = temp.lower()
return temp
def decrypt(self, cipherText: str, key: str) -> str:
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
temp = cipherText.upper()
KEY = key.upper()
plain = ""
i = 0
while i < len(cipherText):
index = KEY.index(temp[i])
plain += LETTERS[index]
i = i + 1
plain = plain.lower()
return plain
def encrypt(self, plainText: str, key: str) -> str:
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
temp = plainText.upper()
i = 0
EncryptedText = ""
while i < len(plainText):
index = LETTERS.index(temp[i])
EncryptedText += key[index]
i = i + 1
EncryptedText = EncryptedText.upper()
return EncryptedText
def analyseUsingCharFrequency(self, cipher: str) -> str:
freqInfo = "ETAOINSRHLDCUMFPGWYBVKXJQZ"
newTemp = "-" * len(cipher)
temp = cipher.upper()
dictionary = {}
for letters in temp:
dictionary[letters] = 0
for letters in temp:
dictionary[letters] += 1
dictionary = sorted(dictionary.items(), reverse=True, key=lambda x: x[1])
#print("len: ", len(temp))
for position in range(0, len(temp)):
#print("position: ", position)
#print(dictionary[position])
if position >= len(dictionary) - 1:
break
#print("dict: ", dictionary[1][0])
i = 0
while i < len(dictionary):
#print(len(dictionary))
#print(dictionary[i][0])
j = 0
while j < len(temp):
#print("temp: ", temp[j],"dict: ", dictionary[i][0])
if temp[j] == dictionary[i][0]:
#print("..", temp[j:])
newTemp = newTemp[:j] + freqInfo[i] + newTemp[j + 1:]
#print("tmp: ", temp)
j = j + 1
i = i + 1
return newTemp
| gpl-3.0 |
samnazarko/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.networking/resources/lib/CompLogger.py | 51 | 2076 | import time
from functools import wraps
TEST_LOG_BOOL = True
def test_logger(msg):
print 'test-' + msg
def comprehensive_logger(logger=None, logging=True, maxlength=25, nowait=False):
'''
Decorator to log the inputs and outputs of functions, as well as the time taken
to run the function.
Requires: time, functools
logger: [opt] logging function, if not provided print is used
logging: [opt] boolean, turn logging on and off, default is True
maxlength: [opt] integer, sets the maximum length an argument or returned variable cant take, default 25
nowait: [opt] boolean, instructs the logger not to wait for the function to finish, default is False
'''
def default_logger(msg):
print msg
if logger == None:
logger = default_logger
def get_args(*args, **kwargs):
all_args = []
for i, arg in enumerate(args):
itm = 'pos' + str(i) + ": " + str(arg)[:maxlength]
all_args.append(itm)
for k, v in kwargs.iteritems():
itm = str(k) + ": " + str(v)[:maxlength]
all_args.append(itm)
return all_args
def decorater(func):
@wraps(func)
def wrapper(*args, **kwargs):
if logging and logger != None:
logger(func.__module__ + '.' + func.__name__ + " received: " + ", ".join(get_args(*args, **kwargs)))
if nowait:
func(*args, **kwargs)
logger(func.__module__ + '.' + func.__name__ + " -nowait")
return
else:
start = time.time()
result = func(*args, **kwargs)
end = time.time()
if logging and logger != None:
logger(func.__module__ + '.' + func.__name__ + " [" + str(end-start) + "] " + ' returns: ' + str(result)[:maxlength])
return result
return wrapper
return decorater
clog = comprehensive_logger
@clog(logging=TEST_LOG_BOOL)
def arg_tester(a, b, cdef):
print 'a: ' + str(a)
print 'b: ' + str(b)
print 'cdef: ' + str(cdef)
if __name__ == "__main__":
arg_tester('han', ['chewie', 'luke'], cdef='123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890') | gpl-2.0 |
crazcalm/Py3.4_exploration | DataStructures/Trees/PriorityQueue.py | 1 | 8766 | """
Priority Queue with Binary Heaps:
---------------------------------
Introduction:
------------
A priority queue acts like a queue in that you can dequeue and item by
removing it from the front. However, in a priority queue the logical order
of items inside the queue is determined by their priority. The highest priority
items are at the front of the queue and the lowest priority items are at the
back.
The classic way to implement a priority queue is using a data structure
called a binary heap. A binary heap will allow us to both enqueue and dequeue
items in O(logn).
The binary heap is interesting to study because when we diagram the heap it
looks a lot like a tree, but when we implement it we only use a single list
as an internal representation.
The binary heap has two common variations: the 'min heap,' in which the
smallest key is always at the front, and the 'max heap,' in which the largest
key is always at the front.
In this section, we will implement the min heap.
Basic Operations List:
---------------------
BinaryHeap(): creates a new, empty binary heap.
insert(k): adds a new item to the heap.
findMin(): returns the item with the minimum key value, leaving item in the heap.
delMin(): returns the item with the minimum key value, removing the item from the list.
isEmpty(): returns true if the heap is empty, false otherwise.
size(): returns the number of items in the heap.
buildHeap(list): builds a new heap from a list of keys.
The Structure Property:
-----------------------
In order to make our heap work effeciently, we will take advantage of the
logarithmic nature of the tree to represent our heap.
In order to guarantee logarithmic performance, we must keep our tree balanced.
A balanced binary tree has roughly the same number of nodes in the left and right
subtrees of the root.
In our heap implemention we keep the tree balanced by creating a 'complete
binary tree.'
A complete binary tree is a tree in which each level has all of its nodes.
The exception to this is the bottom of the tree, which we fill in from left to
right.
Interesting Property:
---------------------
Another interesting property of a complete tree is that we can represent it
using a single list. We do not need to use nodes and references or even lists
of lists.
Because the tree is complete, the left child of the parent (at position p) is
is the node that is found a position 2p in the list. Similarly, the right child
of the parent is at position 2p+1 in the list.
The Heap Order Property:
------------------------
The 'heap order property' is as follows: In a heap, for every node x with
parent p, the key in p is smaller than or equal to the key in x.
Heap Operations:
----------------
We will begin our implemention of a binary heap with the constructor.
Since the entire binary heap can be represented by a single list, all the constructor
will do is initialize the list and an attribute currentSize to keep track of the
current size of the heap.
You will notice that an empty binary heap has a single zero as the first element
of heapList and that this zero is not used, but is there so that a simple
integer can be used in later methods.
Insert method:
--------------
The next method we will implement in 'insert.'
The easiest, and most efficient, way to add an item to a list is to simply
append the item to the end of the list. The good news about appending is that
it guarantees that we will maintain the complete tree property.
The bad news is that we will very likely violate the heap structure property.
However, it is possible to write a method that will allow us to regain the heap
structure property by comparing the newly added items with its parent.
If the newly added item is less than its parent, then we can swap the item
with its parent.
Notice that when we percolate an item up, we are restoring the heap property
between the newly added item and the parent. We are also perserving the heap
property for any siblings.
delMin method notes:
--------------------
Since the heap property requires that the root of the tree be the smallest
item in the tree, finding the minimum item is easy. The hard part of delMin
is restoring full compliance with the heap structure and heap order properties
after the root has been removed.
We can restore our heap in two steps.
1. We will restore the root item by taking the last item in the list and moving
it to the root position.
2. We will restore the heap order property by pushng the new root node down
the tree to its proper position.
In order to maintain the heap order property, all we need to do is swap the
root with its smallest child less than the root. After the initial swap, we may
repeat the swapping process with a node and its children until the node is swapped
into a position on the tree where it is already less than both children.
The code for percolating a node down the tree is found in the 'percDown' and
'minChild' methods.
buildHeap method:
-----------------
To finish our discussion of binary heaps, we will look at a method to build
an entire heap from a list of keys.
If we start with an entire list then we can build the whole heap in O(n)
operations.
We will start from the middle of the list. Although we start out in the middle
of the tree and work our way back towards the root, the percDown method enusres that
the largest child is always down the tree. Beacuse it is a complete binary tree,
any nodes past the halfway point will be leaves and therefore have no children.
"""
class BinaryHeap:
"""
A priority queue acts like a queue in that you can dequeue and item by
removing it from the front. However, in a priority queue the logical order
of items inside the queue is determined by their priority. The highest priority
items are at the front of the queue and the lowest priority items are at the
back.
"""
def __init__(self):
"""
You will notice that an empty binary heap has a single zero as the first element
of heapList and that this zero is not used, but is there so that a simple
integer can be used in later methods.
"""
self.heapList = [0]
self.currentSize = 0
def percUp(self, i):
"""
Compares the newly inserted item with its parent. If the item is less
than its parents, then they will be switched.
"""
while i // 2 > 0:
if self.heapList[i] < self.heapList[i // 2]:
tmp = self.heapList[i // 2]
self.heapList[i // 2] = self.heapList[i]
self.heapList[i] = tmp
i = i // 2
def insert(self, k):
"""
Inserts a new item to the binary heap
"""
self.heapList.append(k)
self.currentSize = self.currentSize + 1
self.percUp(self.currentSize)
def percDown(self, i):
"""
Moves the root of the binary heap (or subtree of the heap) down to its
proper place in the tree.
"""
while(i * 2) <= self.currentSize:
mc = self.minChild(i)
if self.heapList[i] > self.heapList[mc]:
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
i = mc
def minChild(self, i):
"""
Returns the index of the min Child
"""
if i * 2 + 1 > self.currentSize:
return i*2
else:
if self.heapList[i*2] < self.heapList[i*2+1]:
return i*2
else:
return i*2+1
def delMin(self):
"""
Returns the smallest item in the Binary Heap
"""
retval = self.heapList[1]
self.heapList[1] = self.heapList[self.currentSize]
self.currentSize = self.currentSize - 1
self.heapList.pop()
self.percDown(1)
return retval
def buildHeap(self, alist):
"""
Builds a Binary Heap from a list
"""
i = len(alist) // 2
self.currentSize = len(alist)
self.heapList = [0] + alist[:]
while (i>0):
self.percDown(i)
i = i - 1
def isEmpty(self):
return self.currentSize == 0
if __name__ == "__main__":
test = [5,7,3,11]
test2 = [9,6,5,2,3]
print("Binary heap test 1:\n\n")
bh = BinaryHeap()
for x in test:
bh.insert(x)
while not bh.isEmpty():
print(bh.delMin())
print("Binary heap test 2: \n\n")
bh2 = BinaryHeap()
bh2.buildHeap(test2)
while not bh2.isEmpty():
print(bh2.delMin())
| mit |
xen0l/ansible | lib/ansible/modules/packaging/os/swupd.py | 37 | 8830 | #!/usr/bin/python
# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swupd
short_description: Manages updates and bundles in ClearLinux systems.
description:
- Manages updates and bundles with the swupd bundle manager, which is used by the
Clear Linux Project for Intel Architecture.
version_added: "2.3"
author: Alberto Murillo (@albertomurillo)
options:
contenturl:
description:
- URL pointing to the contents of available bundles.
If not specified, the contents are retrieved from clearlinux.org.
format:
description:
- The format suffix for version file downloads. For example [1,2,3,staging,etc].
If not specified, the default format is used.
manifest:
description:
- The manifest contains information about the bundles at certaion version of the OS.
Specify a Manifest version to verify against that version or leave unspecified to
verify against the current version.
aliases: [release, version]
name:
description:
- Name of the (I)bundle to install or remove.
aliases: [bundle]
state:
description:
- Indicates the desired (I)bundle state. C(present) ensures the bundle
is installed while C(absent) ensures the (I)bundle is not installed.
default: present
choices: [present, absent]
update:
description:
- Updates the OS to the latest version.
url:
description:
- Overrides both I(contenturl) and I(versionurl).
verify:
description:
- Verify content for OS version.
versionurl:
description:
- URL for version string download.
'''
EXAMPLES = '''
- name: Update the OS to the latest version
swupd:
update: yes
- name: Installs the "foo" bundle
swupd:
name: foo
state: present
- name: Removes the "foo" bundle
swupd:
name: foo
state: absent
- name: Check integrity of filesystem
swupd:
verify: yes
- name: Downgrade OS to release 12920
swupd:
verify: yes
manifest: 12920
'''
RETURN = '''
stdout:
description: stdout of swupd
returned: always
type: string
stderr:
description: stderr of swupd
returned: always
type: string
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Swupd(object):
FILES_NOT_MATCH = "files did not match"
FILES_REPLACED = "missing files were replaced"
FILES_FIXED = "files were fixed"
FILES_DELETED = "files were deleted"
def __init__(self, module):
# Fail if swupd is not found
self.module = module
self.swupd_cmd = module.get_bin_path("swupd", False)
if not self.swupd_cmd:
module.fail_json(msg="Could not find swupd.")
# Initialize parameters
for key in module.params.keys():
setattr(self, key, module.params[key])
# Initialize return values
self.changed = False
self.failed = False
self.msg = None
self.rc = None
self.stderr = ""
self.stdout = ""
def _run_cmd(self, cmd):
self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
def _get_cmd(self, command):
cmd = "%s %s" % (self.swupd_cmd, command)
if self.format:
cmd += " --format=%s" % self.format
if self.manifest:
cmd += " --manifest=%s" % self.manifest
if self.url:
cmd += " --url=%s" % self.url
else:
if self.contenturl and command != "check-update":
cmd += " --contenturl=%s" % self.contenturl
if self.versionurl:
cmd += " --versionurl=%s" % self.versionurl
return cmd
def _is_bundle_installed(self, bundle):
try:
os.stat("/usr/share/clear/bundles/%s" % bundle)
except OSError:
return False
return True
def _needs_update(self):
cmd = self._get_cmd("check-update")
self._run_cmd(cmd)
if self.rc == 0:
return True
if self.rc == 1:
return False
self.failed = True
self.msg = "Failed to check for updates"
def _needs_verify(self):
cmd = self._get_cmd("verify")
self._run_cmd(cmd)
if self.rc != 0:
self.failed = True
self.msg = "Failed to check for filesystem inconsistencies."
if self.FILES_NOT_MATCH in self.stdout:
return True
return False
def install_bundle(self, bundle):
"""Installs a bundle with `swupd bundle-add bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=not self._is_bundle_installed(bundle))
if self._is_bundle_installed(bundle):
self.msg = "Bundle %s is already installed" % bundle
return
cmd = self._get_cmd("bundle-add %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s installed" % bundle
return
if self.rc == 18:
self.msg = "Bundle name %s is invalid" % bundle
return
self.failed = True
self.msg = "Failed to install bundle %s" % bundle
def remove_bundle(self, bundle):
"""Removes a bundle with `swupd bundle-remove bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=self._is_bundle_installed(bundle))
if not self._is_bundle_installed(bundle):
self.msg = "Bundle %s not installed"
return
cmd = self._get_cmd("bundle-remove %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s removed" % bundle
return
self.failed = True
self.msg = "Failed to remove bundle %s" % bundle
def update_os(self):
"""Updates the os with `swupd update`"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_update())
if not self._needs_update():
self.msg = "There are no updates available"
return
cmd = self._get_cmd("update")
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Update successful"
return
self.failed = True
self.msg = "Failed to check for updates"
def verify_os(self):
"""Verifies filesystem against specified or current version"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_verify())
if not self._needs_verify():
self.msg = "No files where changed"
return
cmd = self._get_cmd("verify --fix")
self._run_cmd(cmd)
if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
self.changed = True
self.msg = "Fix successful"
return
self.failed = True
self.msg = "Failed to verify the OS"
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
contenturl=dict(type="str"),
format=dict(type="str"),
manifest=dict(aliases=["release", "version"], type="int"),
name=dict(aliases=["bundle"], type="str"),
state=dict(default="present", choices=["present", "absent"], type="str"),
update=dict(default=False, type="bool"),
url=dict(type="str"),
verify=dict(default=False, type="bool"),
versionurl=dict(type="str"),
),
required_one_of=[["name", "update", "verify"]],
mutually_exclusive=[["name", "update", "verify"]],
supports_check_mode=True
)
swupd = Swupd(module)
name = module.params["name"]
state = module.params["state"]
update = module.params["update"]
verify = module.params["verify"]
if update:
swupd.update_os()
elif verify:
swupd.verify_os()
elif state == "present":
swupd.install_bundle(name)
elif state == "absent":
swupd.remove_bundle(name)
else:
swupd.failed = True
if swupd.failed:
module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
else:
module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
if __name__ == '__main__':
main()
| gpl-3.0 |
Serag8/Bachelor | google_appengine/lib/django-1.2/django/contrib/auth/__init__.py | 44 | 3953 | import datetime
from warnings import warn
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing authentication backend %s: "%s"' % (module, e))
except ValueError, e:
raise ImproperlyConfigured('Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" authentication backend' % (module, attr))
try:
getattr(cls, 'supports_object_permissions')
except AttributeError:
warn("Authentication backends without a `supports_object_permissions` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_object_permissions = False
try:
getattr(cls, 'supports_anonymous_user')
except AttributeError:
warn("Authentication backends without a `supports_anonymous_user` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_anonymous_user = False
return cls()
def get_backends():
from django.conf import settings
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
if not backends:
raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?')
return backends
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
user.last_login = datetime.datetime.now()
user.save()
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.id:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.id
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
request.session.flush()
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user(request):
from django.contrib.auth.models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except KeyError:
user = AnonymousUser()
return user
| mit |
ifduyue/sentry | src/sentry/tsdb/dummy.py | 2 | 5101 | """
sentry.tsdb.dummy
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from sentry.tsdb.base import BaseTSDB
class DummyTSDB(BaseTSDB):
"""
A no-op time-series storage.
"""
def incr(self, model, key, timestamp=None, count=1, environment_id=None):
self.validate_arguments([model], [environment_id])
def merge(self, model, destination, sources, timestamp=None, environment_ids=None):
environment_ids = (
set(environment_ids) if environment_ids is not None else set()).union(
[None])
self.validate_arguments([model], environment_ids)
def delete(self, models, keys, start=None, end=None, timestamp=None, environment_ids=None):
environment_ids = (
set(environment_ids) if environment_ids is not None else set()).union(
[None])
self.validate_arguments(models, environment_ids)
def get_range(self, model, keys, start, end, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
_, series = self.get_optimal_rollup_series(start, end, rollup)
return {k: [(ts, 0) for ts in series] for k in keys}
def record(self, model, key, values, timestamp=None, environment_id=None):
self.validate_arguments([model], [environment_id])
def get_distinct_counts_series(self, model, keys, start, end=None,
rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
_, series = self.get_optimal_rollup_series(start, end, rollup)
return {k: [(ts, 0) for ts in series] for k in keys}
def get_distinct_counts_totals(self, model, keys, start, end=None,
rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
return {k: 0 for k in keys}
def get_distinct_counts_union(self, model, keys, start, end=None,
rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
return 0
def merge_distinct_counts(self, model, destination, sources,
timestamp=None, environment_ids=None):
environment_ids = (
set(environment_ids) if environment_ids is not None else set()).union(
[None])
self.validate_arguments([model], environment_ids)
def delete_distinct_counts(self, models, keys, start=None, end=None,
timestamp=None, environment_ids=None):
environment_ids = (
set(environment_ids) if environment_ids is not None else set()).union(
[None])
self.validate_arguments(models, environment_ids)
def record_frequency_multi(self, requests, timestamp=None, environment_id=None):
self.validate_arguments([model for model, request in requests], [environment_id])
def get_most_frequent(self, model, keys, start, end=None,
rollup=None, limit=None, environment_id=None):
self.validate_arguments([model], [environment_id])
return {key: [] for key in keys}
def get_most_frequent_series(self, model, keys, start, end=None,
rollup=None, limit=None, environment_id=None):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
return {key: [(timestamp, {}) for timestamp in series] for key in keys}
def get_frequency_series(self, model, items, start, end=None, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
results = {}
for key, members in items.items():
result = results[key] = []
for timestamp in series:
result.append((timestamp, {k: 0.0 for k in members}, ))
return results
def get_frequency_totals(self, model, items, start, end=None, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
results = {}
for key, members in items.items():
results[key] = {member: 0.0 for member in members}
return results
def merge_frequencies(self, model, destination, sources, timestamp=None, environment_ids=None):
environment_ids = list(
(set(environment_ids) if environment_ids is not None else set()).union(
[None]))
self.validate_arguments([model], environment_ids)
def delete_frequencies(self, models, keys, start=None, end=None,
timestamp=None, environment_ids=None):
environment_ids = (
set(environment_ids) if environment_ids is not None else set()).union(
[None])
self.validate_arguments(models, environment_ids)
def flush(self):
pass
| bsd-3-clause |
Paul-Ezell/cinder-1 | cinder/image/cache.py | 7 | 9215 | # Copyright (C) 2015 Pure Storage, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pytz import timezone
import six
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.i18n import _LW
from cinder import rpc
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ImageVolumeCache(object):
def __init__(self, db, volume_api, max_cache_size_gb=0,
max_cache_size_count=0):
self.db = db
self.volume_api = volume_api
self.max_cache_size_gb = int(max_cache_size_gb)
self.max_cache_size_count = int(max_cache_size_count)
self.notifier = rpc.get_notifier('volume', CONF.host)
def get_by_image_volume(self, context, volume_id):
return self.db.image_volume_cache_get_by_volume_id(context, volume_id)
def evict(self, context, cache_entry):
LOG.debug('Evicting image cache entry: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
self.db.image_volume_cache_delete(context, cache_entry['volume_id'])
self._notify_cache_eviction(context, cache_entry['image_id'],
cache_entry['host'])
def get_entry(self, context, volume_ref, image_id, image_meta):
cache_entry = self.db.image_volume_cache_get_and_update_last_used(
context,
image_id,
volume_ref['host']
)
if cache_entry:
LOG.debug('Found image-volume cache entry: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
if self._should_update_entry(cache_entry, image_meta):
LOG.debug('Image-volume cache entry is out-dated, evicting: '
'%(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
self._delete_image_volume(context, cache_entry)
cache_entry = None
if cache_entry:
self._notify_cache_hit(context, cache_entry['image_id'],
cache_entry['host'])
else:
self._notify_cache_miss(context, image_id,
volume_ref['host'])
return cache_entry
def create_cache_entry(self, context, volume_ref, image_id, image_meta):
"""Create a new cache entry for an image.
This assumes that the volume described by volume_ref has already been
created and is in an available state.
"""
LOG.debug('Creating new image-volume cache entry for image '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume_ref['host']})
# When we are creating an image from a volume the updated_at field
# will be a unicode representation of the datetime. In that case
# we just need to parse it into one. If it is an actual datetime
# we want to just grab it as a UTC naive datetime.
image_updated_at = image_meta['updated_at']
if type(image_updated_at) in [unicode, str]:
image_updated_at = timeutils.parse_strtime(image_updated_at)
else:
image_updated_at = image_updated_at.astimezone(timezone('UTC'))
cache_entry = self.db.image_volume_cache_create(
context,
volume_ref['host'],
image_id,
image_updated_at.replace(tzinfo=None),
volume_ref['id'],
volume_ref['size']
)
LOG.debug('New image-volume cache entry created: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
return cache_entry
def ensure_space(self, context, space_required, host):
"""Makes room for a cache entry.
Returns True if successful, false otherwise.
"""
# Check to see if the cache is actually limited.
if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:
return True
# Make sure that we can potentially fit the image in the cache
# and bail out before evicting everything else to try and make
# room for it.
if (self.max_cache_size_gb != 0 and
space_required > self.max_cache_size_gb):
return False
# Assume the entries are ordered by most recently used to least used.
entries = self.db.image_volume_cache_get_all_for_host(context, host)
current_count = len(entries)
current_size = 0
for entry in entries:
current_size += entry['size']
# Add values for the entry we intend to create.
current_size += space_required
current_count += 1
LOG.debug('Image-volume cache for host %(host)s current_size (GB) = '
'%(size_gb)s (max = %(max_gb)s), current count = %(count)s '
'(max = %(max_count)s).',
{'host': host,
'size_gb': current_size,
'max_gb': self.max_cache_size_gb,
'count': current_count,
'max_count': self.max_cache_size_count})
while ((current_size > self.max_cache_size_gb
or current_count > self.max_cache_size_count)
and len(entries)):
entry = entries.pop()
LOG.debug('Reclaiming image-volume cache space; removing cache '
'entry %(entry)s.', {'entry': self._entry_to_str(entry)})
self._delete_image_volume(context, entry)
current_size -= entry['size']
current_count -= 1
LOG.debug('Image-volume cache for host %(host)s new size (GB) = '
'%(size_gb)s, new count = %(count)s.',
{'host': host,
'size_gb': current_size,
'count': current_count})
# It is only possible to not free up enough gb, we will always be able
# to free enough count. This is because 0 means unlimited which means
# it is guaranteed to be >0 if limited, and we can always delete down
# to 0.
if self.max_cache_size_gb > 0:
if current_size > self.max_cache_size_gb > 0:
LOG.warning(_LW('Image-volume cache for host %(host)s does '
'not have enough space (GB).'), {'host': host})
return False
return True
def _notify_cache_hit(self, context, image_id, host):
self._notify_cache_action(context, image_id, host, 'hit')
def _notify_cache_miss(self, context, image_id, host):
self._notify_cache_action(context, image_id, host, 'miss')
def _notify_cache_eviction(self, context, image_id, host):
self._notify_cache_action(context, image_id, host, 'evict')
def _notify_cache_action(self, context, image_id, host, action):
data = {
'image_id': image_id,
'host': host,
}
LOG.debug('ImageVolumeCache notification: action=%(action)s'
' data=%(data)s.', {'action': action, 'data': data})
self.notifier.info(context, 'image_volume_cache.%s' % action, data)
def _delete_image_volume(self, context, cache_entry):
"""Delete a volume and remove cache entry."""
volume_ref = self.db.volume_get(context, cache_entry['volume_id'])
# Delete will evict the cache entry.
self.volume_api.delete(context, volume_ref)
def _get_image_volume_name(self, image_id):
return 'image-volume-' + image_id
def _should_update_entry(self, cache_entry, image_meta):
"""Ensure that the cache entry image data is still valid."""
image_updated_utc = (image_meta['updated_at']
.astimezone(timezone('UTC')))
cache_updated_utc = (cache_entry['image_updated_at']
.replace(tzinfo=timezone('UTC')))
LOG.debug('Image-volume cache entry image_update_at = %(entry_utc)s, '
'requested image updated_at = %(image_utc)s.',
{'entry_utc': six.text_type(cache_updated_utc),
'image_utc': six.text_type(image_updated_utc)})
return image_updated_utc != cache_updated_utc
def _entry_to_str(self, cache_entry):
return six.text_type({
'id': cache_entry['id'],
'image_id': cache_entry['image_id'],
'volume_id': cache_entry['volume_id'],
'host': cache_entry['host'],
'size': cache_entry['size'],
'image_updated_at': cache_entry['image_updated_at'],
'last_used': cache_entry['last_used'],
})
| apache-2.0 |
muro/Cinnamon | files/usr/lib/cinnamon-desktop-editor/cinnamon-desktop-editor.py | 12 | 20676 | #!/usr/bin/env python2
import sys
import os
import gettext
import glob
from gi.repository import GLib, Gtk, Gio, CMenu, GdkPixbuf
from optparse import OptionParser
import shutil
sys.path.insert(0,'/usr/lib/cinnamon-menu-editor')
from cme import util
sys.path.insert(0,'/usr/lib/cinnamon-settings')
from bin import XletSettingsWidgets
# i18n
gettext.install("cinnamon", "/usr/share/locale")
# i18n for menu item
_ = gettext.gettext
home = os.path.expanduser("~")
PANEL_LAUNCHER_PATH = os.path.join(home, ".cinnamon", "panel-launchers")
EXTENSIONS = (".png", ".xpm", ".svg")
def escape_space(string):
return string.replace(" ", "\ ")
def try_icon_name(filename):
# Detect if the user picked an icon, and make
# it into an icon name.
if not filename.endswith(EXTENSIONS):
return filename
noext_filename = filename[:-4]
theme = Gtk.IconTheme.get_default()
resolved_path = None
for path in theme.get_search_path():
if noext_filename.startswith(path):
resolved_path = noext_filename[len(path):].lstrip(os.sep)
break
if resolved_path is None:
return filename
parts = resolved_path.split(os.sep)
# icon-theme/size/category/icon
if len(parts) != 4:
return filename
return parts[3]
def get_icon_string(image):
filename = image._file
if filename is not None:
return try_icon_name(filename)
return image._icon_name
def strip_extensions(icon):
if icon.endswith(EXTENSIONS):
return icon[:-4]
else:
return icon
def set_icon_string(image, icon):
if GLib.path_is_absolute(icon):
image._file = icon
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(icon, 64, 64)
if pixbuf is not None:
image.set_from_pixbuf(pixbuf)
else:
image._icon_name = strip_extensions(icon)
image.set_from_icon_name (strip_extensions (icon), Gtk.IconSize.BUTTON)
def ask(msg):
dialog = Gtk.MessageDialog(None,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
None)
dialog.set_default_size(400, 200)
dialog.set_markup(msg)
dialog.show_all()
response = dialog.run()
dialog.destroy()
return response == Gtk.ResponseType.YES
DESKTOP_GROUP = GLib.KEY_FILE_DESKTOP_GROUP
class IconPicker(object):
def __init__(self, dialog, button, image):
self.dialog = dialog
self.button = button
self.button.connect('clicked', self.pick_icon)
self.image = image
def pick_icon(self, button):
chooser = Gtk.FileChooserDialog(title=_("Choose an icon"),
parent=self.dialog,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
chooser.add_shortcut_folder("/usr/share/pixmaps")
chooser.add_shortcut_folder("/usr/share/icons")
fn = get_icon_string(self.image)
if fn:
if GLib.path_is_absolute(fn):
chooser.set_filename(fn)
else:
theme = Gtk.IconTheme.get_default()
icon_info = theme.lookup_icon(fn, 64, 0)
icon_info_fn = icon_info.get_filename() if icon_info != None else None
if icon_info_fn:
chooser.set_filename(icon_info_fn)
filter = Gtk.FileFilter();
filter.add_pixbuf_formats ();
chooser.set_filter(filter);
preview = Gtk.Image()
chooser.set_preview_widget(preview)
chooser.connect("update-preview", self.update_icon_preview_cb, preview)
response = chooser.run()
if response == Gtk.ResponseType.ACCEPT:
set_icon_string (self.image, chooser.get_filename())
chooser.destroy()
def update_icon_preview_cb(self, chooser, preview):
filename = chooser.get_preview_filename()
if filename is None:
return
chooser.set_preview_widget_active(False)
if os.path.isfile(filename):
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(filename, 128, 128)
if pixbuf is not None:
preview.set_from_pixbuf(pixbuf)
chooser.set_preview_widget_active(True)
class ItemEditor(object):
ui_file = None
def __init__(self, item_path = None, callback = None, destdir = None):
self.builder = Gtk.Builder()
self.builder.add_from_file(self.ui_file)
self.callback = callback
self.destdir = destdir
self.dialog = self.builder.get_object('editor')
self.dialog.connect('response', self.on_response)
icon = self.builder.get_object('icon-image')
icon._file = None
icon._icon_name = None
self.build_ui()
self.item_path = item_path
self.load()
self.check_custom_path()
self.resync_validity()
def build_ui(self):
raise NotImplementedError()
def check_custom_path(self):
raise NotImplementedError()
def sync_widgets(self, name_valid, exec_valid):
if name_valid:
self.builder.get_object('name-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'ok')
self.builder.get_object('name-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("Valid name"))
else:
self.builder.get_object('name-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'stop')
self.builder.get_object('name-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("The name cannot be empty."))
if exec_valid:
self.builder.get_object('exec-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'ok')
self.builder.get_object('exec-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("Valid executable"))
else:
self.builder.get_object('exec-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'stop')
self.builder.get_object('exec-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("The executable is not valid. It cannot be empty and spaces in the path must be escaped with backslash (\\)."))
self.builder.get_object('ok').set_sensitive(name_valid and exec_valid)
def validate_exec_line(self, string):
try:
success, parsed = GLib.shell_parse_argv(string)
if GLib.find_program_in_path(parsed[0]) or ((not os.path.isdir(parsed[0])) and os.access(parsed[0], os.X_OK)):
return True
except:
pass
return False
def get_keyfile_edits(self):
raise NotImplementedError()
def set_text(self, ctl, name):
try:
val = self.keyfile.get_string(DESKTOP_GROUP, name)
except GLib.GError:
pass
else:
self.builder.get_object(ctl).set_text(val)
def set_check(self, ctl, name):
try:
val = self.keyfile.get_boolean(DESKTOP_GROUP, name)
except GLib.GError:
pass
else:
self.builder.get_object(ctl).set_active(val)
def set_icon(self, ctl, name):
try:
val = self.keyfile.get_string(DESKTOP_GROUP, name)
except GLib.GError:
pass
else:
set_icon_string(self.builder.get_object(ctl), val)
def load(self):
self.keyfile = GLib.KeyFile()
path = self.item_path or ""
try:
self.keyfile.load_from_file(path, util.KEY_FILE_FLAGS)
except GLib.GError:
pass
def save(self):
util.fillKeyFile(self.keyfile, self.get_keyfile_edits())
contents, length = self.keyfile.to_data()
need_exec = False
if self.destdir is not None:
self.item_path = os.path.join(self.destdir, self.builder.get_object('name-entry').get_text() + ".desktop")
need_exec = True
try:
with open(self.item_path, 'w') as f:
f.write(contents)
if need_exec:
os.chmod(self.item_path, 0o755)
except IOError:
if ask(_("Cannot create the launcher at this location. Add to the desktop instead?")):
self.destdir = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)
self.save()
def run(self):
self.dialog.present()
def on_response(self, dialog, response):
if response == Gtk.ResponseType.OK:
self.save()
self.callback(True, self.item_path)
else:
self.callback(False, self.item_path)
self.dialog.destroy()
class LauncherEditor(ItemEditor):
ui_file = '/usr/lib/cinnamon-desktop-editor/launcher-editor.ui'
def build_ui(self):
self.icon_picker = IconPicker(self.dialog,
self.builder.get_object('icon-button'),
self.builder.get_object('icon-image'))
self.builder.get_object('exec-browse').connect('clicked', self.pick_exec)
self.builder.get_object('name-entry').connect('changed', self.resync_validity)
self.builder.get_object('exec-entry').connect('changed', self.resync_validity)
def resync_validity(self, *args):
name_text = self.builder.get_object('name-entry').get_text().strip()
exec_text = self.builder.get_object('exec-entry').get_text().strip()
name_valid = name_text is not ""
exec_valid = self.validate_exec_line(exec_text)
self.sync_widgets(name_valid, exec_valid)
def load(self):
super(LauncherEditor, self).load()
self.set_text('name-entry', "Name")
self.set_text('exec-entry', "Exec")
self.set_text('comment-entry', "Comment")
self.set_check('terminal-check', "Terminal")
self.set_icon('icon-image', "Icon")
def get_keyfile_edits(self):
return dict(Name=self.builder.get_object('name-entry').get_text(),
Exec=self.builder.get_object('exec-entry').get_text(),
Comment=self.builder.get_object('comment-entry').get_text(),
Terminal=self.builder.get_object('terminal-check').get_active(),
Icon=get_icon_string(self.builder.get_object('icon-image')),
Type="Application")
def pick_exec(self, button):
chooser = Gtk.FileChooserDialog(title=_("Choose a command"),
parent=self.dialog,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
response = chooser.run()
if response == Gtk.ResponseType.ACCEPT:
self.builder.get_object('exec-entry').set_text(escape_space(chooser.get_filename()))
chooser.destroy()
def check_custom_path(self):
pass
class DirectoryEditor(ItemEditor):
ui_file = '/usr/lib/cinnamon-desktop-editor/directory-editor.ui'
def build_ui(self):
self.icon_picker = IconPicker(self.dialog,
self.builder.get_object('icon-button'),
self.builder.get_object('icon-image'))
self.builder.get_object('name-entry').connect('changed', self.resync_validity)
def resync_validity(self, *args):
name_text = self.builder.get_object('name-entry').get_text().strip()
valid = (name_text is not "")
self.builder.get_object('ok').set_sensitive(valid)
def load(self):
super(DirectoryEditor, self).load()
self.set_text('name-entry', "Name")
self.set_text('comment-entry', "Comment")
self.set_icon('icon-image', "Icon")
def get_keyfile_edits(self):
return dict(Name=self.builder.get_object('name-entry').get_text(),
Comment=self.builder.get_object('comment-entry').get_text(),
Icon=get_icon_string(self.builder.get_object('icon-image')),
Type="Directory")
def check_custom_path(self):
pass
class CinnamonLauncherEditor(ItemEditor):
ui_file = '/usr/lib/cinnamon-desktop-editor/launcher-editor.ui'
def build_ui(self):
self.icon_picker = IconPicker(self.dialog,
self.builder.get_object('icon-button'),
self.builder.get_object('icon-image'))
self.builder.get_object('exec-browse').connect('clicked', self.pick_exec)
self.builder.get_object('name-entry').connect('changed', self.resync_validity)
self.builder.get_object('exec-entry').connect('changed', self.resync_validity)
def check_custom_path(self):
dir = Gio.file_new_for_path(PANEL_LAUNCHER_PATH)
if not dir.query_exists(None):
dir.make_directory_with_parents(None)
if self.item_path is None or "cinnamon-custom-launcher" not in self.item_path:
i = 1
while True:
name = os.path.join(PANEL_LAUNCHER_PATH, 'cinnamon-custom-launcher-' + str(i) + '.desktop')
file = Gio.file_parse_name(name)
if not file.query_exists(None):
break
i += 1
self.item_path = name
def resync_validity(self, *args):
name_text = self.builder.get_object('name-entry').get_text().strip()
exec_text = self.builder.get_object('exec-entry').get_text().strip()
name_valid = name_text is not ""
exec_valid = self.validate_exec_line(exec_text)
self.sync_widgets(name_valid, exec_valid)
def load(self):
super(CinnamonLauncherEditor, self).load()
self.set_text('name-entry', "Name")
self.set_text('exec-entry', "Exec")
self.set_text('comment-entry', "Comment")
self.set_check('terminal-check', "Terminal")
self.set_icon('icon-image', "Icon")
def get_keyfile_edits(self):
return dict(Name=self.builder.get_object('name-entry').get_text(),
Exec=self.builder.get_object('exec-entry').get_text(),
Comment=self.builder.get_object('comment-entry').get_text(),
Terminal=self.builder.get_object('terminal-check').get_active(),
Icon=get_icon_string(self.builder.get_object('icon-image')),
Type="Application")
def pick_exec(self, button):
chooser = Gtk.FileChooserDialog(title=_("Choose a command"),
parent=self.dialog,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
response = chooser.run()
if response == Gtk.ResponseType.ACCEPT:
self.builder.get_object('exec-entry').set_text(escape_space(chooser.get_filename()))
chooser.destroy()
class Main:
def __init__(self):
parser = OptionParser()
parser.add_option("-o", "--original", dest="original_desktop_file", help="Path of original .desktop file", metavar="ORIG_FILE")
parser.add_option("-d", "--directory", dest="destination_directory", help="Destination directory of the new launcher", metavar="DEST_DIR")
parser.add_option("-f", "--file", dest="desktop_file", help="Name of desktop file (i.e. gnome-terminal.desktop)", metavar="DESKTOP_NAME")
parser.add_option("-m", "--mode", dest="mode", default=None, help="Mode to run in: launcher, directory, panel-launcher or nemo-launcher")
(options, args) = parser.parse_args()
if not options.mode:
parser.error("You must select a mode to run in")
if options.mode in ("directory", "launcher") and not options.original_desktop_file:
parser.error("directory and launcher modes must be accompanied by the -o argument")
if options.mode == "nemo-launcher" and not options.destination_directory:
parser.error("nemo-launcher mode must be accompanied by the -d argument")
if options.mode == "cinnamon-launcher" and len(args) < 3:
parser.error("cinnamon-launcher mode must have the following syntax:\n\
cinnamon-desktop-editor -mcinnamon-launcher [-ffoo.desktop] <uuid> <instance-id> <json-path>")
self.tree = CMenu.Tree.new("cinnamon-applications.menu", CMenu.TreeFlags.INCLUDE_NODISPLAY)
if not self.tree.load_sync():
raise ValueError("can not load menu tree")
self.mode = options.mode
self.orig_file = options.original_desktop_file
self.desktop_file = options.desktop_file
self.dest_dir = options.destination_directory
if options.mode == "cinnamon-launcher":
self.uuid = args[0]
self.iid = args[1]
self.json_path = args[2]
if self.desktop_file is not None:
self.get_desktop_path()
if self.mode == "directory":
editor = DirectoryEditor(self.orig_file, self.directory_cb)
editor.dialog.show_all()
elif self.mode == "launcher":
editor = LauncherEditor(self.orig_file, self.launcher_cb)
editor.dialog.show_all()
elif self.mode == "cinnamon-launcher":
editor = CinnamonLauncherEditor(self.orig_file, self.panel_launcher_cb)
editor.dialog.show_all()
elif self.mode == "nemo-launcher":
editor = LauncherEditor(self.orig_file, self.nemo_launcher_cb, self.dest_dir)
editor.dialog.show_all()
else:
print "Invalid args"
def directory_cb(self, success, dest_path):
self.end()
def launcher_cb(self, success, dest_path):
self.end()
def panel_launcher_cb(self, success, dest_path):
if success:
factory = XletSettingsWidgets.Factory(self.json_path, self.iid, False, self.uuid)
launchers = factory.settings.get_value("launcherList")
if self.desktop_file is None:
launchers.append(os.path.split(dest_path)[1])
else:
i = launchers.index(self.desktop_file)
if i >= 0:
del launchers[i]
launchers.insert(i, os.path.split(dest_path)[1])
factory.settings.set_value("launcherList", launchers)
if self.desktop_file is None:
self.ask_menu_launcher(dest_path)
self.end()
def nemo_launcher_cb(self, success, dest_path):
if success:
self.ask_menu_launcher(dest_path)
self.end()
def ask_menu_launcher(self, dest_path):
if ask(_("Would you like to add this launcher to the menu also? It will be placed in the Other category initially.")):
new_file_path = os.path.join(util.getUserItemPath(), os.path.split(dest_path)[1])
shutil.copy(dest_path, new_file_path)
def get_desktop_path(self):
self.search_menu_sys()
if self.orig_file is None:
panel_launchers = glob.glob(os.path.join(PANEL_LAUNCHER_PATH, "*.desktop"))
for launcher in panel_launchers:
if os.path.split(launcher)[1] == self.desktop_file:
self.orig_file = launcher
def search_menu_sys(self, parent=None):
if parent is None:
parent = self.tree.get_root_directory()
item_iter = parent.iter()
item_type = item_iter.next()
while item_type != CMenu.TreeItemType.INVALID:
if item_type == CMenu.TreeItemType.DIRECTORY:
item = item_iter.get_directory()
self.search_menu_sys(item)
elif item_type == CMenu.TreeItemType.ENTRY:
item = item_iter.get_entry()
if item.get_desktop_file_id() == self.desktop_file:
self.orig_file = item.get_desktop_file_path()
item_type = item_iter.next()
def end(self):
Gtk.main_quit()
if __name__ == "__main__":
Gtk.Window.set_default_icon_name('gnome-panel-launcher')
Main()
Gtk.main()
| gpl-2.0 |
averainy/averainy | python/wechat_test.py | 1 | 7797 | #!/usr/bin/python
#coding=utf-8
import xml.dom.minidom
def get_tagname():
doc = xml.dom.minidom.parseString(input_xml_string)
class msg_parse:
def __init__(self,msg):
self.doc = xml.dom.minidom.parseString(msg)
def _getData(self,tagName):
nodes=self.doc.getElementsByTagName(tagName)
if nodes:
return nodes[0].childNodes[0].data
else:
return None
def getFromUserName(self):
return self._getData("FromUserName")
def getToUserName(self):
return self._getData("ToUserName")
def getCreateTime(self):
return self._getData("CreateTime")
def getMsgType(self):
return self._getData("MsgType")
def getContent(self):
return self._getData("Content")
def getMsgId(self):
return self._getData("MsgId")
def getPicUrl(self):
return self._getData("PicUrl")
def getMediaId(self):
return self._getData("MediaId")
def getFormat(self):
return self._getData("Format")
def getMediaId(self):
return self._getData("MediaId")
def getThumbMediaId(self):
return self._getData("ThumbMediaId")
def getLocation_X(self):
return self._getData("Location_X")
def getLocation_Y(self):
return self._getData("Location_Y")
def getScale(self):
return self._getData("Scale")
def getLabel(self):
return self._getData("Label")
def getTitle(self):
return self._getData("Title")
def getDescription(self):
return self._getData("Description")
def getUrl(self):
return self._getData("Url")
def getEvent(self):
return self._getData("Event")
def getEventKey(self):
return self._getData("EventKey")
def getTicket(self):
return self._getData("Ticket")
def getLatitude(self):
return self._getData("Latitude")
def getLongitude(self):
return self._getData("Longitude")
def getPrecision(self):
return self._getData("Precision")
def getTicket(self):
return self._getData("Ticket")
def getTicket(self):
return self._getData("Ticket")
if __name__ == "__main__":
# 文本消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[ffdfdromUser]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[this is a test]]></Content>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getFromUserName()
print abc.getToUserName()
print abc.getCreateTime()
print abc.getMsgType()
print abc.getContent()
print abc.getMsgId()
# 图片消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<PicUrl><![CDATA[this is a url]]></PicUrl>
<MediaId><![CDATA[media_id]]></MediaId>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getPicUrl()
print abc.getMediaId()
# 语音消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1357290913</CreateTime>
<MsgType><![CDATA[voice]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<Format><![CDATA[Format]]></Format>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getFormat()
# 视频消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1357290913</CreateTime>
<MsgType><![CDATA[video]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<ThumbMediaId><![CDATA[thumb_media_id]]></ThumbMediaId>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getThumbMediaId()
# 地理位置消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1351776360</CreateTime>
<MsgType><![CDATA[location]]></MsgType>
<Location_X>23.134521</Location_X>
<Location_Y>113.358803</Location_Y>
<Scale>20</Scale>
<Label><![CDATA[位置信息]]></Label>
<MsgId>1234567890123456</MsgId>
</xml> """
abc=msg_parse(res)
print abc.getLocation_X()
print abc.getLocation_Y()
print abc.getScale()
print abc.getLabel()
# 链接消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1351776360</CreateTime>
<MsgType><![CDATA[link]]></MsgType>
<Title><![CDATA[公众平台官网链接]]></Title>
<Description><![CDATA[公众平台官网链接]]></Description>
<Url><![CDATA[url]]></Url>
<MsgId>1234567890123456</MsgId>
</xml> """
abc=msg_parse(res)
print abc.getTitle()
print abc.getDescription()
print abc.getUrl()
# 关注/取消关注事件
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe]]></Event>
</xml>"""
abc=msg_parse(res)
print abc.getEvent()
# 扫描带参数二维码事件
# 用户未关注时,进行关注后的事件推送
res="""<xml><ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe]]></Event>
<EventKey><![CDATA[qrscene_123123]]></EventKey>
<Ticket><![CDATA[TICKET]]></Ticket>
</xml>"""
abc=msg_parse(res)
print abc.getEventKey()
print abc.getTicket()
# 用户已关注时的事件推送
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[SCAN]]></Event>
<EventKey><![CDATA[SCENE_VALUE]]></EventKey>
<Ticket><![CDATA[TICKET]]></Ticket>
</xml>"""
abc=msg_parse(res)
print abc.getEventKey()
print abc.getTicket()
# 上报地理位置事件
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[LOCATION]]></Event>
<Latitude>23.137466</Latitude>
<Longitude>113.352425</Longitude>
<Precision>119.385040</Precision>
</xml>"""
abc=msg_parse(res)
print abc.getLatitude()
print abc.getLongitude()
print abc.getPrecision()
# 点击菜单拉取消息时的事件推送
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[CLICK]]></Event>
<EventKey><![CDATA[EVENTKEY]]></EventKey>
</xml>"""
abc=msg_parse(res)
print abc.getMsgType()
print abc.getEvent()
print abc.getEventKey()
# 点击菜单跳转链接时的事件推送
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[VIEW]]></Event>
<EventKey><![CDATA[www.qq.com]]></EventKey>
</xml>"""
abc=msg_parse(res)
print abc.getMsgType()
print abc.getEvent()
print abc.getEventKey()
| gpl-2.0 |
arifsetiawan/edx-platform | common/lib/xmodule/xmodule/modulestore/draft_and_published.py | 116 | 4797 | """
This module provides an abstraction for Module Stores that support Draft and Published branches.
"""
import threading
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from . import ModuleStoreEnum
# Things w/ these categories should never be marked as version=DRAFT
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
class BranchSettingMixin(object):
"""
A mixin to manage a module store's branch setting.
The order of override is (from higher precedence to lower):
1. thread-specific setting temporarily set using the branch_setting contextmanager
2. the return value of the branch_setting_func passed into this mixin's init method
3. the default branch setting being ModuleStoreEnum.Branch.published_only
"""
def __init__(self, *args, **kwargs):
"""
:param branch_setting_func: a function that returns the default branch setting for this object.
If not specified, ModuleStoreEnum.Branch.published_only is used as the default setting.
"""
self.default_branch_setting_func = kwargs.pop(
'branch_setting_func',
lambda: ModuleStoreEnum.Branch.published_only
)
super(BranchSettingMixin, self).__init__(*args, **kwargs)
# cache the branch setting on a local thread to support a multi-threaded environment
self.thread_cache = threading.local()
@contextmanager
def branch_setting(self, branch_setting, course_id=None): # pylint: disable=unused-argument
"""
A context manager for temporarily setting a store's branch value on the current thread.
"""
previous_thread_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
try:
self.thread_cache.branch_setting = branch_setting
yield
finally:
self.thread_cache.branch_setting = previous_thread_branch_setting
def get_branch_setting(self, course_id=None): # pylint: disable=unused-argument
"""
Returns the current branch_setting on the store.
Returns the thread-local setting, if set.
Otherwise, returns the default value of the setting function set during the store's initialization.
"""
# first check the thread-local cache
thread_local_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
if thread_local_branch_setting:
return thread_local_branch_setting
else:
# return the default value
return self.default_branch_setting_func()
class ModuleStoreDraftAndPublished(BranchSettingMixin):
"""
A mixin for a read-write database backend that supports two branches, Draft and Published, with
options to prefer Draft and fallback to Published.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_item(self, location, user_id, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def get_parent_location(self, location, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def has_changes(self, xblock):
raise NotImplementedError
@abstractmethod
def publish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def unpublish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def revert_to_published(self, location, user_id):
raise NotImplementedError
@abstractmethod
def has_published_version(self, xblock):
raise NotImplementedError
@abstractmethod
def convert_to_draft(self, location, user_id):
raise NotImplementedError
@abstractmethod
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Import the given xblock into the current branch setting: import completely overwrites any
existing block of the same id.
In ModuleStoreDraftAndPublished, importing a published block ensures that access from the draft
will get a block (either the one imported or a preexisting one). See xml_importer
"""
raise NotImplementedError
class UnsupportedRevisionError(ValueError):
"""
This error is raised if a method is called with an unsupported revision parameter.
"""
def __init__(self, allowed_revisions=None):
if not allowed_revisions:
allowed_revisions = [
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.draft_only
]
super(UnsupportedRevisionError, self).__init__('revision not one of {}'.format(allowed_revisions))
| agpl-3.0 |
xfguo/pysnmp | pysnmp/entity/rfc3413/mibvar.py | 7 | 2726 | #
# THESE FUNCTIONS ARE OBSOLETE AND MUST NOT BE USED!
# USE pysnmp.entity.rfc3413.oneliner.mibvar INSTEAD
#
# MIB variable pretty printers/parsers
from pyasn1.type import univ
from pysnmp.smi.error import NoSuchObjectError
# Name
def mibNameToOid(mibView, name):
if isinstance(name[0], tuple):
f = lambda x='',y='': (x,y)
modName, symName = f(*name[0])
if modName: # load module if needed
mibView.mibBuilder.loadModules(modName)
else:
mibView.mibBuilder.loadModules() # load all (slow)
if symName:
oid, label, suffix = mibView.getNodeNameByDesc(symName, modName)
else:
oid, label, suffix = mibView.getFirstNodeName(modName)
suffix = name[1:]
modName, symName, _s = mibView.getNodeLocation(oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column XXX
modName, symName, _s = mibView.getNodeLocation(oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(modName, symName)
return oid, rowNode.getInstIdFromIndices(*suffix)
else: # scalar or incomplete spec
return oid, suffix
elif not isinstance(name, tuple):
name = tuple(univ.ObjectIdentifier(name))
oid, label, suffix = mibView.getNodeNameByOid(name)
return oid, suffix
__scalarSuffix = (univ.Integer(0),)
def oidToMibName(mibView, oid):
if not isinstance(oid, tuple):
oid = tuple(univ.ObjectIdentifier(oid))
_oid, label, suffix = mibView.getNodeNameByOid(oid)
modName, symName, __suffix = mibView.getNodeLocation(_oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column
__modName, __symName, __s = mibView.getNodeLocation(_oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(__modName, __symName)
return (symName, modName), rowNode.getIndicesFromInstId(suffix)
elif not suffix: # scalar
return (symName, modName), suffix
elif suffix == (0,): # scalar
return (symName, modName), __scalarSuffix
else:
raise NoSuchObjectError(
str='No MIB registered that defines %s object, closest known parent is %s (%s::%s)' % (univ.ObjectIdentifier(oid), univ.ObjectIdentifier(mibNode.name), modName, symName)
)
# Value
def cloneFromMibValue(mibView, modName, symName, value):
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'syntax'): # scalar
return mibNode.syntax.clone(value)
else:
return # identifier
| bsd-3-clause |
nelmiux/CarnotKE | jyhton/Lib/test/test_dict.py | 10 | 22663 | import unittest
from test import test_support
from java.util import Map
from java.util.concurrent import ConcurrentMap
import UserDict, random, string
import gc, weakref
class DictTest(unittest.TestCase):
_class = None
def _make_dict(self, pydict):
return pydict if not self._class else self._class(pydict)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(self._make_dict(dict()), {})
self.assertIsNot(self._make_dict(dict()), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(self._make_dict(eval(dictliteral)), dict(items))
def test_bool(self):
self.assertIs(not self._make_dict({}), True)
self.assertTrue(self._make_dict({1: 2}))
self.assertIs(bool(self._make_dict({})), False)
self.assertIs(bool(self._make_dict({1: 2})), True)
def test_keys(self):
d = self._make_dict({})
self.assertEqual(d.keys(), [])
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertTrue(d.has_key('a'))
self.assertTrue(d.has_key('b'))
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._make_dict({})
self.assertEqual(d.values(), [])
d = self._make_dict({1:2})
self.assertEqual(d.values(), [2])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._make_dict({})
self.assertEqual(d.items(), [])
d = self._make_dict({1:2})
self.assertEqual(d.items(), [(1, 2)])
self.assertRaises(TypeError, d.items, None)
def test_has_key(self):
d = self._make_dict({})
self.assertFalse(d.has_key('a'))
d = self._make_dict({'a': 1, 'b': 2})
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = self._make_dict({})
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = self._make_dict({'a': 1, 'b': 2})
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = self._make_dict({})
self.assertEqual(len(d), 0)
d = self._make_dict({'a': 1, 'b': 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
d = self._make_dict({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = self._make_dict({})
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = self._make_dict({1:1, 2:2, 3:3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = self._make_dict({})
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
d = self._make_dict({})
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._make_dict({})
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, UserDict.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
def test_copy(self):
d = self._make_dict({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual(self._make_dict({}).copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = self._make_dict({})
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = self._make_dict({'a': 1, 'b': 2})
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = self._make_dict({})
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = self._make_dict({hashed1: 5})
if isinstance(y, Map) and not isinstance(y, ConcurrentMap):
raise unittest.SkipTest("java.util.Map objects that do not implement ConcurrentMap have no concurrency guarantees")
# given that there are potentially multiple copies of the
# above dict in self._make_dict, record the hash_count so it
# can be subtracted out
setup_hash_count = hashed1.hash_count
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, setup_hash_count)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = self._make_dict({})
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = self._make_dict({})
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# verify longs/ints get same value when key > 32 bits
# (for 64-bit archs). See SF bug #689659.
x = 4503599627370496L
y = 4503599627370496
h = self._make_dict({x: 'anything', y: 'something else'})
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
@unittest.skipIf(test_support.is_jython, "Weakly consistent iteration is compatible with mutation")
def test_mutatingiteration(self):
# changing dict size during iteration
d = self._make_dict({})
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_repr(self):
d = self._make_dict({})
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = self._make_dict({})
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = self._make_dict({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assertFalse(self._make_dict({}) < {})
self.assertFalse(self._make_dict({1: 2}) < {1L: 2L})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = self._make_dict({BadCmp(): 1})
d2 = self._make_dict({1: 1})
with self.assertRaises(Exc):
d1 < d2
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr(self._make_dict({}), "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = self._make_dict({})
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __cmp__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = self._make_dict({})
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.has_key(x2)',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec stmt in locals()
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = self._make_dict({})
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = self._make_dict({})
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = self._make_dict(
{'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None})
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter objects
class C(object):
pass
iterators = (dict.iteritems, dict.itervalues, dict.iterkeys)
for i in iterators:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.x = i(container)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@test_support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@test_support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@test_support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
def test_list_equality(self):
class A(dict): pass
for dtype in (A, dict):
self.assertEquals([dtype()], [dict()])
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
with test_support.check_py3k_warnings(
('dict(.has_key..| inequality comparisons) not supported in 3.x',
DeprecationWarning)):
test_support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
russell/fairy-slipper | config.py | 2 | 2149 | # Copyright (c) 2015 Russell Sim <russell.sim@gmail.com>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'fairy_slipper.controllers.root.RootController',
'modules': ['fairy_slipper'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/fairy_slipper/templates',
'api_doc': '%(confdir)s/api_doc',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'root': {'level': 'INFO', 'handlers': ['console']},
'loggers': {
'fairy_slipper': {'level': 'DEBUG', 'handlers': ['console']},
'pecan.commands.serve': {'level': 'DEBUG', 'handlers': ['console']},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
| apache-2.0 |
slevenhagen/odoo | addons/l10n_si/account_wizard.py | 255 | 1120 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 6,
}
| agpl-3.0 |
rduivenvoorde/QGIS | python/plugins/processing/gui/MultipleFileInputDialog.py | 45 | 4865 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleExternalInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
(C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
otb at c-s dot fr (CS SI)
Contributors : Victor Olaya - basis from MultipleInputDialog
Alexia Mondot (CS SI) - new parameter
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import warnings
from qgis.core import QgsSettings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import QByteArray
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox, QFileDialog
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleFileInputDialog(BASE, WIDGET):
def __init__(self, options):
super(MultipleFileInputDialog, self).__init__(None)
self.setupUi(self)
self.lstLayers.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.selectedoptions = options
# Additional buttons
self.btnAdd = QPushButton(self.tr('Add file'))
self.buttonBox.addButton(self.btnAdd,
QDialogButtonBox.ActionRole)
self.btnRemove = QPushButton(self.tr('Remove file(s)'))
self.buttonBox.addButton(self.btnRemove,
QDialogButtonBox.ActionRole)
self.btnRemoveAll = QPushButton(self.tr('Remove all'))
self.buttonBox.addButton(self.btnRemoveAll,
QDialogButtonBox.ActionRole)
self.btnAdd.clicked.connect(self.addFile)
self.btnRemove.clicked.connect(lambda: self.removeRows())
self.btnRemoveAll.clicked.connect(lambda: self.removeRows(True))
self.settings = QgsSettings()
self.restoreGeometry(self.settings.value("/Processing/multipleFileInputDialogGeometry", QByteArray()))
self.populateList()
self.finished.connect(self.saveWindowGeometry)
def saveWindowGeometry(self):
self.settings.setValue("/Processing/multipleInputDialogGeometry", self.saveGeometry())
def populateList(self):
model = QStandardItemModel()
for option in self.selectedoptions:
item = QStandardItem(option)
model.appendRow(item)
self.lstLayers.setModel(model)
def accept(self):
self.selectedoptions = []
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
self.selectedoptions.append(item.text())
QDialog.accept(self)
def reject(self):
QDialog.reject(self)
def addFile(self):
settings = QgsSettings()
if settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
files, selected_filter = QFileDialog.getOpenFileNames(self,
self.tr('Select File(s)'), path, self.tr('All files (*.*)'))
if len(files) == 0:
return
model = self.lstLayers.model()
for filePath in files:
item = QStandardItem(filePath)
model.appendRow(item)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(files[0]))
def removeRows(self, removeAll=False):
if removeAll:
self.lstLayers.model().clear()
else:
self.lstLayers.setUpdatesEnabled(False)
indexes = sorted(self.lstLayers.selectionModel().selectedIndexes())
for i in reversed(indexes):
self.lstLayers.model().removeRow(i.row())
self.lstLayers.setUpdatesEnabled(True)
| gpl-2.0 |
Athrun29/horizon | openstack_dashboard/test/test_data/trove_data.py | 6 | 9541 | # Copyright 2013 Rackspace Hosting.
# Copyright 2015 HP Software, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient.v1 import backups
from troveclient.v1 import clusters
from troveclient.v1 import databases
from troveclient.v1 import datastores
from troveclient.v1 import flavors
from troveclient.v1 import instances
from troveclient.v1 import users
from openstack_dashboard.test.test_data import utils
CLUSTER_DATA_ONE = {
"status": "ACTIVE",
"id": "dfbbd9ca-b5e1-4028-adb7-f78643e17998",
"name": "Test Cluster",
"created": "2014-04-25T20:19:23",
"updated": "2014-04-25T20:19:23",
"links": [],
"datastore": {
"type": "mongodb",
"version": "2.6"
},
"ip": ["10.0.0.1"],
"instances": [
{
"id": "416b0b16-ba55-4302-bbd3-ff566032e1c1",
"shard_id": "5415b62f-f301-4e84-ba90-8ab0734d15a7",
"flavor": {
"id": "7",
"links": []
},
"volume": {
"size": 100
}
},
{
"id": "965ef811-7c1d-47fc-89f2-a89dfdd23ef2",
"shard_id": "5415b62f-f301-4e84-ba90-8ab0734d15a7",
"flavor": {
"id": "7",
"links": []
},
"volume": {
"size": 100
}
},
{
"id": "3642f41c-e8ad-4164-a089-3891bf7f2d2b",
"shard_id": "5415b62f-f301-4e84-ba90-8ab0734d15a7",
"flavor": {
"id": "7",
"links": []
},
"volume": {
"size": 100
}
}
],
"task": {
"name": "test_task"
}
}
CLUSTER_DATA_TWO = {
"status": "ACTIVE",
"id": "dfbbd9ca-b5e1-4028-adb7-f78643e17998",
"name": "Test Cluster",
"created": "2014-04-25T20:19:23",
"updated": "2014-04-25T20:19:23",
"links": [],
"datastore": {
"type": "vertica",
"version": "7.1"
},
"ip": ["10.0.0.1"],
"instances": [
{
"id": "416b0b16-ba55-4302-bbd3-ff566032e1c1",
"flavor": {
"id": "7",
"links": []
},
"volume": {
"size": 100
}
},
{
"id": "965ef811-7c1d-47fc-89f2-a89dfdd23ef2",
"flavor": {
"id": "7",
"links": []
},
"volume": {
"size": 100
}
},
{
"id": "3642f41c-e8ad-4164-a089-3891bf7f2d2b",
"flavor": {
"id": "7",
"links": []
},
"volume": {
"size": 100
}
}
]
}
DATABASE_DATA_ONE = {
"status": "ACTIVE",
"updated": "2013-08-12T22:00:09",
"name": "Test Database",
"links": [],
"created": "2013-08-12T22:00:03",
"ip": [
"10.0.0.3",
],
"volume": {
"used": 0.13,
"size": 1,
},
"flavor": {
"id": "1",
"links": [],
},
"datastore": {
"type": "mysql",
"version": "5.5"
},
"id": "6ddc36d9-73db-4e23-b52e-368937d72719",
}
DATABASE_DATA_TWO = {
"status": "ACTIVE",
"updated": "2013-08-12T22:00:09",
"name": "Test Database With DNS",
"links": [],
"created": "2013-08-12T22:00:03",
"hostname": "trove.instance-2.com",
"volume": {
"used": 0.13,
"size": 1,
},
"flavor": {
"id": "1",
"links": [],
},
"datastore": {
"type": "mysql",
"version": "5.6"
},
"id": "4d7b3f57-44f5-41d2-8e86-36b88cad572a",
}
BACKUP_ONE = {
"instance_id": "6ddc36d9-73db-4e23-b52e-368937d72719",
"status": "COMPLETED",
"updated": "2013-08-13T19:39:38",
"locationRef": "http://swift/v1/AUTH/database_backups/0edb.tar.gz",
"name": "backup1",
"created": "2013-08-15T18:10:14",
"size": 0.13,
"id": "0edb3c14-8919-4583-9add-00df9e524081",
"description": "Long description of backup",
}
BACKUP_TWO = {
"instance_id": "4d7b3f57-44f5-41d2-8e86-36b88cad572a",
"status": "COMPLETED",
"updated": "2013-08-10T20:20:44",
"locationRef": "http://swift/v1/AUTH/database_backups/e460.tar.gz",
"name": "backup2",
"created": "2013-08-10T20:20:37",
"size": 0.13,
"id": "e4602a3c-2bca-478f-b059-b6c215510fb4",
"description": "Longer description of backup",
}
BACKUP_TWO_INC = {
"instance_id": "4d7b3f57-44f5-41d2-8e86-36b88cad572a",
"status": "COMPLETED",
"updated": "2013-08-10T20:20:55",
"locationRef": "http://swift/v1/AUTH/database_backups/f145.tar.gz",
"name": "backup2-Incr",
"created": "2013-08-10T20:20:37",
"size": 0.13,
"id": "e4602a3c-2bca-478f-b059-b6c215510fb5",
"description": "Longer description of backup",
"parent_id": "e4602a3c-2bca-478f-b059-b6c215510fb4",
}
USER_ONE = {
"name": "Test_User",
"host": "%",
"databases": [DATABASE_DATA_ONE["name"]],
}
USER_DB_ONE = {
"name": "db1",
}
DATASTORE_ONE = {
"id": "537fb940-b5eb-40d9-bdbd-91a3dcb9c17d",
"links": [],
"name": "mysql"
}
DATASTORE_TWO = {
"id": "ccb31517-c472-409d-89b4-1a13db6bdd36",
"links": [],
"name": "mysql"
}
DATASTORE_MONGODB = {
"id": "ccb31517-c472-409d-89b4-1a13db6bdd37",
"links": [],
"name": "mongodb"
}
VERSION_ONE = {
"name": "5.5",
"links": [],
"image": "b7956bb5-920e-4299-b68e-2347d830d939",
"active": 1,
"datastore": "537fb940-b5eb-40d9-bdbd-91a3dcb9c17d",
"packages": "5.5",
"id": "390a6d52-8347-4e00-8e4c-f4fa9cf96ae9"
}
VERSION_TWO = {
"name": "5.6",
"links": [],
"image": "c7956bb5-920e-4299-b68e-2347d830d938",
"active": 1,
"datastore": "537fb940-b5eb-40d9-bdbd-91a3dcb9c17d",
"packages": "5.6",
"id": "500a6d52-8347-4e00-8e4c-f4fa9cf96ae9"
}
FLAVOR_ONE = {
"ram": 512,
"id": "1",
"links": [],
"name": "m1.tiny"
}
FLAVOR_TWO = {
"ram": 768,
"id": "10",
"links": [],
"name": "eph.rd-smaller"
}
FLAVOR_THREE = {
"ram": 800,
"id": "100",
"links": [],
"name": "test.1"
}
VERSION_MONGODB_2_6 = {
"name": "2.6",
"links": [],
"image": "c7956bb5-920e-4299-b68e-2347d830d937",
"active": 1,
"datastore": "ccb31517-c472-409d-89b4-1a13db6bdd37",
"packages": "2.6",
"id": "600a6d52-8347-4e00-8e4c-f4fa9cf96ae9"
}
def data(TEST):
cluster1 = clusters.Cluster(clusters.Clusters(None),
CLUSTER_DATA_ONE)
cluster2 = clusters.Cluster(clusters.Clusters(None),
CLUSTER_DATA_TWO)
database1 = instances.Instance(instances.Instances(None),
DATABASE_DATA_ONE)
database2 = instances.Instance(instances.Instances(None),
DATABASE_DATA_TWO)
bkup1 = backups.Backup(backups.Backups(None), BACKUP_ONE)
bkup2 = backups.Backup(backups.Backups(None), BACKUP_TWO)
bkup3 = backups.Backup(backups.Backups(None), BACKUP_TWO_INC)
user1 = users.User(users.Users(None), USER_ONE)
user_db1 = databases.Database(databases.Databases(None),
USER_DB_ONE)
datastore1 = datastores.Datastore(datastores.Datastores(None),
DATASTORE_ONE)
version1 = datastores.\
DatastoreVersion(datastores.DatastoreVersions(None),
VERSION_ONE)
flavor1 = flavors.Flavor(flavors.Flavors(None), FLAVOR_ONE)
flavor2 = flavors.Flavor(flavors.Flavors(None), FLAVOR_TWO)
flavor3 = flavors.Flavor(flavors.Flavors(None), FLAVOR_THREE)
datastore_mongodb = datastores.Datastore(datastores.Datastores(None),
DATASTORE_MONGODB)
version_mongodb_2_6 = datastores.\
DatastoreVersion(datastores.DatastoreVersions(None),
VERSION_MONGODB_2_6)
TEST.trove_clusters = utils.TestDataContainer()
TEST.trove_clusters.add(cluster1)
TEST.trove_clusters.add(cluster2)
TEST.databases = utils.TestDataContainer()
TEST.database_backups = utils.TestDataContainer()
TEST.database_users = utils.TestDataContainer()
TEST.database_user_dbs = utils.TestDataContainer()
TEST.database_flavors = utils.TestDataContainer()
TEST.databases.add(database1)
TEST.databases.add(database2)
TEST.database_backups.add(bkup1)
TEST.database_backups.add(bkup2)
TEST.database_backups.add(bkup3)
TEST.database_users.add(user1)
TEST.database_user_dbs.add(user_db1)
TEST.datastores = utils.TestDataContainer()
TEST.datastores.add(datastore1)
TEST.datastores.add(datastore_mongodb)
TEST.database_flavors.add(flavor1, flavor2, flavor3)
TEST.datastore_versions = utils.TestDataContainer()
TEST.datastore_versions.add(version_mongodb_2_6)
TEST.datastore_versions.add(version1)
| apache-2.0 |
ttanner/kryptomime | docs/conf.py | 1 | 8229 | # -*- coding: utf-8 -*-
#
# kryptomime documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 20 12:30:16 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kryptomime'
copyright = u'2013, Thomas Tanner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kryptomimedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'kryptomime.tex', u'kryptomime Documentation',
u'Thomas Tanner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kryptomime', u'kryptomime Documentation',
[u'Thomas Tanner'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kryptomime', u'kryptomime Documentation',
u'Thomas Tanner', 'kryptomime', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Autodoc settings ----------------------------------------------------------
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'show-inheritance', 'undoc-members', 'show-hidden']
autoclass_content = 'both'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| lgpl-3.0 |
hamiltont/CouchPotatoServer | libs/git/exceptions.py | 122 | 2527 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
class GitException(Exception):
def __init__(self, msg):
super(GitException, self).__init__()
self.msg = msg
def __repr__(self):
return "%s: %s" % (type(self).__name__, self.msg)
__str__ = __repr__
class CannotFindRepository(GitException):
pass
class MergeConflict(GitException):
def __init__(self, msg='Merge Conflict'):
super(MergeConflict, self).__init__(msg=msg)
class GitCommandFailedException(GitException):
def __init__(self, directory, command, popen):
super(GitCommandFailedException, self).__init__(None)
self.command = command
self.directory = os.path.abspath(directory)
self.stderr = popen.stderr.read()
self.stdout = popen.stdout.read()
self.popen = popen
self.msg = "Command %r failed in %s (%s):\n%s\n%s" % (command, self.directory, popen.returncode,
self.stderr, self.stdout)
class NonexistentRefException(GitException):
pass
| gpl-3.0 |
p4datasystems/CarnotKEdist | dist/Lib/test/test_mhlib.py | 11 | 11192 | """
Tests for the mhlib module
Nick Mathewson
"""
### BUG: This suite doesn't currently test the mime functionality of
### mhlib. It should.
import unittest
from test.test_support import is_jython, run_unittest, TESTFN, import_module
import os, StringIO
import sys
mhlib = import_module('mhlib', deprecated=True)
if (sys.platform.startswith("win") or sys.platform=="riscos" or
sys.platform.startswith("atheos") or (is_jython and os._name != 'posix')):
# mhlib.updateline() renames a file to the name of a file that already
# exists. That causes a reasonable OS <wink> to complain in test_sequence
# here, like the "OSError: [Errno 17] File exists" raised on Windows.
# mhlib's listsubfolders() and listallfolders() do something with
# link counts, and that causes test_listfolders() here to get back
# an empty list from its call of listallfolders().
# The other tests here pass on Windows.
raise unittest.SkipTest("skipped on %s -- " % sys.platform +
"too many Unix assumptions")
_mhroot = TESTFN+"_MH"
_mhpath = os.path.join(_mhroot, "MH")
_mhprofile = os.path.join(_mhroot, ".mh_profile")
def normF(f):
return os.path.join(*f.split('/'))
def writeFile(fname, contents):
dir = os.path.split(fname)[0]
if dir and not os.path.exists(dir):
mkdirs(dir)
f = open(fname, 'w')
f.write(contents)
f.close()
def readFile(fname):
f = open(fname)
r = f.read()
f.close()
return r
def writeProfile(dict):
contents = [ "%s: %s\n" % (k, v) for k, v in dict.iteritems() ]
writeFile(_mhprofile, "".join(contents))
def writeContext(folder):
folder = normF(folder)
writeFile(os.path.join(_mhpath, "context"),
"Current-Folder: %s\n" % folder)
def writeCurMessage(folder, cur):
folder = normF(folder)
writeFile(os.path.join(_mhpath, folder, ".mh_sequences"),
"cur: %s\n"%cur)
def writeMessage(folder, n, headers, body):
folder = normF(folder)
headers = "".join([ "%s: %s\n" % (k, v) for k, v in headers.iteritems() ])
contents = "%s\n%s\n" % (headers,body)
mkdirs(os.path.join(_mhpath, folder))
writeFile(os.path.join(_mhpath, folder, str(n)), contents)
def getMH():
return mhlib.MH(os.path.abspath(_mhpath), _mhprofile)
def sortLines(s):
lines = s.split("\n")
lines = [ line.strip() for line in lines if len(line) >= 2 ]
lines.sort()
return lines
# These next 2 functions are copied from test_glob.py.
def mkdirs(fname):
if os.path.exists(fname) or fname == '':
return
base, file = os.path.split(fname)
mkdirs(base)
os.mkdir(fname)
def deltree(fname):
if not os.path.exists(fname):
return
for f in os.listdir(fname):
fullname = os.path.join(fname, f)
if os.path.isdir(fullname):
deltree(fullname)
else:
try:
os.unlink(fullname)
except:
pass
try:
os.rmdir(fname)
except:
pass
class MhlibTests(unittest.TestCase):
def setUp(self):
deltree(_mhroot)
mkdirs(_mhpath)
writeProfile({'Path' : os.path.abspath(_mhpath),
'Editor': 'emacs',
'ignored-attribute': 'camping holiday'})
# Note: These headers aren't really conformant to RFC822, but
# mhlib shouldn't care about that.
# An inbox with a couple of messages.
writeMessage('inbox', 1,
{'From': 'Mrs. Premise',
'To': 'Mrs. Conclusion',
'Date': '18 July 2001'}, "Hullo, Mrs. Conclusion!\n")
writeMessage('inbox', 2,
{'From': 'Mrs. Conclusion',
'To': 'Mrs. Premise',
'Date': '29 July 2001'}, "Hullo, Mrs. Premise!\n")
# A folder with many messages
for i in range(5, 101)+range(101, 201, 2):
writeMessage('wide', i,
{'From': 'nowhere', 'Subject': 'message #%s' % i},
"This is message number %s\n" % i)
# A deeply nested folder
def deep(folder, n):
writeMessage(folder, n,
{'Subject': 'Message %s/%s' % (folder, n) },
"This is message number %s in %s\n" % (n, folder) )
deep('deep/f1', 1)
deep('deep/f1', 2)
deep('deep/f1', 3)
deep('deep/f2', 4)
deep('deep/f2', 6)
deep('deep', 3)
deep('deep/f2/f3', 1)
deep('deep/f2/f3', 2)
def tearDown(self):
deltree(_mhroot)
def test_basic(self):
writeContext('inbox')
writeCurMessage('inbox', 2)
mh = getMH()
eq = self.assertEqual
eq(mh.getprofile('Editor'), 'emacs')
eq(mh.getprofile('not-set'), None)
eq(mh.getpath(), os.path.abspath(_mhpath))
eq(mh.getcontext(), 'inbox')
mh.setcontext('wide')
eq(mh.getcontext(), 'wide')
eq(readFile(os.path.join(_mhpath, 'context')),
"Current-Folder: wide\n")
mh.setcontext('inbox')
inbox = mh.openfolder('inbox')
eq(inbox.getfullname(),
os.path.join(os.path.abspath(_mhpath), 'inbox'))
eq(inbox.getsequencesfilename(),
os.path.join(os.path.abspath(_mhpath), 'inbox', '.mh_sequences'))
eq(inbox.getmessagefilename(1),
os.path.join(os.path.abspath(_mhpath), 'inbox', '1'))
def test_listfolders(self):
mh = getMH()
eq = self.assertEqual
folders = mh.listfolders()
folders.sort()
eq(folders, ['deep', 'inbox', 'wide'])
folders = mh.listallfolders()
folders.sort()
tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3',
'inbox', 'wide'])
tfolders.sort()
eq(folders, tfolders)
folders = mh.listsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2']))
folders = mh.listallsubfolders('deep')
folders.sort()
eq(folders, map(normF, ['deep/f1', 'deep/f2', 'deep/f2/f3']))
eq(mh.listsubfolders(normF('deep/f2')), [normF('deep/f2/f3')])
eq(mh.listsubfolders('inbox'), [])
eq(mh.listallsubfolders('inbox'), [])
def test_sequence(self):
mh = getMH()
eq = self.assertEqual
writeCurMessage('wide', 55)
f = mh.openfolder('wide')
all = f.listmessages()
eq(all, range(5, 101)+range(101, 201, 2))
eq(f.getcurrent(), 55)
f.setcurrent(99)
eq(readFile(os.path.join(_mhpath, 'wide', '.mh_sequences')),
'cur: 99\n')
def seqeq(seq, val):
eq(f.parsesequence(seq), val)
seqeq('5-55', range(5, 56))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('90-108', range(90, 101)+range(101, 109, 2))
seqeq('10:10', range(10, 20))
seqeq('10:+10', range(10, 20))
seqeq('101:10', range(101, 121, 2))
seqeq('cur', [99])
seqeq('.', [99])
seqeq('prev', [98])
seqeq('next', [100])
seqeq('cur:-3', [97, 98, 99])
seqeq('first-cur', range(5, 100))
seqeq('150-last', range(151, 201, 2))
seqeq('prev-next', [98, 99, 100])
lowprimes = [5, 7, 11, 13, 17, 19, 23, 29]
lowcompos = [x for x in range(5, 31) if not x in lowprimes ]
f.putsequences({'cur': [5],
'lowprime': lowprimes,
'lowcompos': lowcompos})
seqs = readFile(os.path.join(_mhpath, 'wide', '.mh_sequences'))
seqs = sortLines(seqs)
eq(seqs, ["cur: 5",
"lowcompos: 6 8-10 12 14-16 18 20-22 24-28 30",
"lowprime: 5 7 11 13 17 19 23 29"])
seqeq('lowprime', lowprimes)
seqeq('lowprime:1', [5])
seqeq('lowprime:2', [5, 7])
seqeq('lowprime:-2', [23, 29])
## Not supported
#seqeq('lowprime:first', [5])
#seqeq('lowprime:last', [29])
#seqeq('lowprime:prev', [29])
#seqeq('lowprime:next', [29])
def test_modify(self):
mh = getMH()
eq = self.assertEqual
mh.makefolder("dummy1")
self.assertIn("dummy1", mh.listfolders())
path = os.path.join(_mhpath, "dummy1")
self.assertTrue(os.path.exists(path))
f = mh.openfolder('dummy1')
def create(n):
msg = "From: foo\nSubject: %s\n\nDummy Message %s\n" % (n,n)
f.createmessage(n, StringIO.StringIO(msg))
create(7)
create(8)
create(9)
eq(readFile(f.getmessagefilename(9)),
"From: foo\nSubject: 9\n\nDummy Message 9\n")
eq(f.listmessages(), [7, 8, 9])
files = os.listdir(path)
files.sort()
eq(files, ['7', '8', '9'])
f.removemessages(['7', '8'])
files = os.listdir(path)
files.sort()
eq(files, [',7', ',8', '9'])
eq(f.listmessages(), [9])
create(10)
create(11)
create(12)
mh.makefolder("dummy2")
f2 = mh.openfolder("dummy2")
eq(f2.listmessages(), [])
f.movemessage(10, f2, 3)
f.movemessage(11, f2, 5)
eq(f.listmessages(), [9, 12])
eq(f2.listmessages(), [3, 5])
eq(readFile(f2.getmessagefilename(3)),
"From: foo\nSubject: 10\n\nDummy Message 10\n")
f.copymessage(9, f2, 4)
eq(f.listmessages(), [9, 12])
eq(readFile(f2.getmessagefilename(4)),
"From: foo\nSubject: 9\n\nDummy Message 9\n")
f.refilemessages([9, 12], f2)
eq(f.listmessages(), [])
eq(f2.listmessages(), [3, 4, 5, 6, 7])
eq(readFile(f2.getmessagefilename(7)),
"From: foo\nSubject: 12\n\nDummy Message 12\n")
# XXX This should check that _copysequences does the right thing.
mh.deletefolder('dummy1')
mh.deletefolder('dummy2')
self.assertNotIn('dummy1', mh.listfolders())
self.assertTrue(not os.path.exists(path))
def test_read(self):
mh = getMH()
eq = self.assertEqual
f = mh.openfolder('inbox')
msg = f.openmessage(1)
# Check some basic stuff from rfc822
eq(msg.getheader('From'), "Mrs. Premise")
eq(msg.getheader('To'), "Mrs. Conclusion")
# Okay, we have the right message. Let's check the stuff from
# mhlib.
lines = sortLines(msg.getheadertext())
eq(lines, ["Date: 18 July 2001",
"From: Mrs. Premise",
"To: Mrs. Conclusion"])
lines = sortLines(msg.getheadertext(lambda h: len(h)==4))
eq(lines, ["Date: 18 July 2001",
"From: Mrs. Premise"])
eq(msg.getbodytext(), "Hullo, Mrs. Conclusion!\n\n")
eq(msg.getbodytext(0), "Hullo, Mrs. Conclusion!\n\n")
# XXXX there should be a better way to reclaim the file handle
msg.fp.close()
del msg
def test_main():
run_unittest(MhlibTests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
XiliangSong/freebase-python | freebase/fcl/fbutil.py | 5 | 3619 | # ==================================================================
# Copyright (c) 2007,2008,2009 Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
import re
import optparse
class FbException(Exception):
pass
media_types = {
'html': ['text/html'],
'txt': ['text/plain'],
'xml': ['text/xml',
'application/xml'],
'atom': ['application/atom+xml'],
'js': ['text/javascript',
'application/javascript',
'application/x-javascript'],
'json': ['application/json'],
'jpg': ['image/jpeg',
'image/pjpeg'],
'gif': ['image/gif'],
'png': ['image/png'],
}
extension_to_media_type = dict([(k,vs[0]) for k,vs in media_types.items()])
media_type_to_extension = {}
for k,vs in media_types.items():
for v in vs:
media_type_to_extension[v] = k
def dirsplit_unsafe(id):
"""
Split a freebase id into a prefix and a tail, dealing with
trailing slashes, etc.
'/abc' -> '/', 'abc'
'/foo/bar' -> '/foo', 'bar'
'foo/bar' -> 'foo', 'bar'
'foo/bar/' -> 'foo', 'bar'
"""
id = id.rstrip('/')
parts = id.rsplit("/", 1)
# if no "/" like "foo", then this comes back as ["foo"]
if len(parts) == 1:
return None, parts[0]
if not parts[0]:
return '/', parts[1]
return parts
def dirsplit(id):
dir,file = dirsplit_unsafe(id)
if dir == '/guid':
raise FbException('%r is not a freebase keypath' % (id,))
return (dir,file)
value_types = [
'/type/text',
'/type/key',
'/type/rawstring',
'/type/float',
'/type/int',
'/type/boolean',
'/type/uri',
'/type/datetime',
'/type/id',
'/type/enumeration',
]
default_propkeys = {
'value': '/type/value/value',
'id': '/type/object/id',
'guid': '/type/object/guid',
'type': '/type/object/type',
'name': '/type/object/name',
'key': '/type/object/key',
'timestamp': '/type/object/timestamp',
'permission': '/type/object/permission',
'creator': '/type/object/creator',
'attribution': '/type/object/attribution'
};
| bsd-2-clause |
churchlab/vdj | bin/explode_imgt.py | 2 | 1040 | #! /usr/bin/env python
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import vdj
from pyutils import cleanup_id
argparser = argparse.ArgumentParser(description=None)
argparser.add_argument('input_file')
argparser.add_argument('output_dir',default=os.getcwd())
args = argparser.parse_args()
for chain in vdj.parse_imgt(args.input_file):
output_file = os.path.join(args.output_dir,'%s.imgt' % cleanup_id(chain.id))
with open(output_file,'w') as op:
print >>op, chain
| apache-2.0 |
h2oai/h2o-2 | py/testdir_single_jvm/test_big_sum_fail.py | 8 | 1981 | import unittest, random, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_util, h2o_import as h2i
import h2o_exec as h2e
UNNECESSARY = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=24)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_big_sum_fail(self):
node = h2o.nodes[0]
SYNDATASETS_DIR = h2o.make_syn_dir()
csvPathname = SYNDATASETS_DIR + '/temp.csv'
hex_key = 'temp.hex'
for trial in range(5):
# what about seed?
cfResult = h2o.nodes[0].create_frame(key=hex_key,
binary_ones_fraction=0.02, binary_fraction=0, randomize=1,
missing_fraction=0, integer_fraction=1, real_range=100,
has_response=0, response_factors=2, factors=100, cols=1,
integer_range=100, value=0, categorical_fraction=0, rows=2.5e+08,
timeoutSecs=300)
inspect = h2o_cmd.runInspect(key=hex_key)
h2o_cmd.infoFromInspect(inspect, hex_key)
if UNNECESSARY:
# this is just doing a head to R. not critical
h2e.exec_expr(execExpr="%s = %s" % (hex_key, hex_key))
h2e.exec_expr(execExpr="Last.value.0 = %s[c(1,2,3,4,5,6),]" % hex_key)
h2e.exec_expr(execExpr="Last.value.0 = Last.value.0")
node.csv_download(src_key="Last.value.0", csvPathname=csvPathname)
node.remove_key("Last.value.0")
# not sure why this happened
h2o_cmd.runStoreView(view=10000, offset=0)
# Fails on this
h2e.exec_expr(execExpr='Last.value.1 = %s[,1]' % hex_key)
print "Trial #", trial, "completed"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
gstoner/gpudb | SQL2XML/antlr3/dfa.py | 117 | 7631 | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licensc]
from antlr3.constants import EOF
from antlr3.exceptions import NoViableAltException, BacktrackingFailed
class DFA(object):
"""@brief A DFA implemented as a set of transition tables.
Any state that has a semantic predicate edge is special; those states
are generated with if-then-else structures in a specialStateTransition()
which is generated by cyclicDFA template.
"""
def __init__(
self,
recognizer, decisionNumber,
eot, eof, min, max, accept, special, transition
):
## Which recognizer encloses this DFA? Needed to check backtracking
self.recognizer = recognizer
self.decisionNumber = decisionNumber
self.eot = eot
self.eof = eof
self.min = min
self.max = max
self.accept = accept
self.special = special
self.transition = transition
def predict(self, input):
"""
From the input stream, predict what alternative will succeed
using this DFA (representing the covering regular approximation
to the underlying CFL). Return an alternative number 1..n. Throw
an exception upon error.
"""
mark = input.mark()
s = 0 # we always start at s0
try:
for _ in xrange(50000):
#print "***Current state = %d" % s
specialState = self.special[s]
if specialState >= 0:
#print "is special"
s = self.specialStateTransition(specialState, input)
if s == -1:
self.noViableAlt(s, input)
return 0
input.consume()
continue
if self.accept[s] >= 1:
#print "accept state for alt %d" % self.accept[s]
return self.accept[s]
# look for a normal char transition
c = input.LA(1)
#print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF')
#print "range = %d..%d" % (self.min[s], self.max[s])
if c >= self.min[s] and c <= self.max[s]:
# move to next state
snext = self.transition[s][c-self.min[s]]
#print "in range, next state = %d" % snext
if snext < 0:
#print "not a normal transition"
# was in range but not a normal transition
# must check EOT, which is like the else clause.
# eot[s]>=0 indicates that an EOT edge goes to another
# state.
if self.eot[s] >= 0: # EOT Transition to accept state?
#print "EOT trans to accept state %d" % self.eot[s]
s = self.eot[s]
input.consume()
# TODO: I had this as return accept[eot[s]]
# which assumed here that the EOT edge always
# went to an accept...faster to do this, but
# what about predicated edges coming from EOT
# target?
continue
#print "no viable alt"
self.noViableAlt(s, input)
return 0
s = snext
input.consume()
continue
if self.eot[s] >= 0:
#print "EOT to %d" % self.eot[s]
s = self.eot[s]
input.consume()
continue
# EOF Transition to accept state?
if c == EOF and self.eof[s] >= 0:
#print "EOF Transition to accept state %d" \
# % self.accept[self.eof[s]]
return self.accept[self.eof[s]]
# not in range and not EOF/EOT, must be invalid symbol
self.noViableAlt(s, input)
return 0
else:
raise RuntimeError("DFA bang!")
finally:
input.rewind(mark)
def noViableAlt(self, s, input):
if self.recognizer._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException(
self.getDescription(),
self.decisionNumber,
s,
input
)
self.error(nvae)
raise nvae
def error(self, nvae):
"""A hook for debugging interface"""
pass
def specialStateTransition(self, s, input):
return -1
def getDescription(self):
return "n/a"
## def specialTransition(self, state, symbol):
## return 0
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(len(string) / 2):
(n, v) = ord(string[i*2]), ord(string[i*2+1])
# Is there a bitwise operation to do this?
if v == 0xFFFF:
v = -1
ret += [v] * n
return ret
unpack = classmethod(unpack)
| apache-2.0 |
saurvs/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/request.py | 136 | 16506 | import base64
import cgi
import Cookie
import os
import StringIO
import tempfile
import urlparse
import stash
from utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url_base
The prefix part of the path; typically / unless the handler has a url_base set
.. attribute:: url
Absolute URL for the request.
.. attribute:: headers
List of request headers.
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: body
Request body as a string
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlparse.urlsplit(self.url)
self._raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = urlparse.parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self._raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
| mpl-2.0 |
fangxingli/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/geoapp/models.py | 109 | 1890 | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
from django.utils.encoding import python_2_unicode_compatible
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __str__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __str__(self): return self.name
class Truth(models.Model):
val = models.BooleanField(default=False)
objects = models.GeoManager()
if not spatialite:
@python_2_unicode_compatible
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __str__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| apache-2.0 |
shakamunyi/nova | nova/api/openstack/compute/plugins/v3/migrations.py | 10 | 2207 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.objects import base as obj_base
ALIAS = "os-migrations"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
def output(migrations_obj):
"""Returns the desired output of the API from an object.
From a MigrationsList's object this method returns a list of
primitive objects with the only necessary fields.
"""
objects = obj_base.obj_to_primitive(migrations_obj)
for obj in objects:
del obj['deleted']
del obj['deleted_at']
return objects
class MigrationsController(wsgi.Controller):
"""Controller for accessing migrations in OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
@extensions.expected_errors(())
def index(self, req):
"""Return all migrations in progress."""
context = req.environ['nova.context']
authorize(context, "index")
migrations = self.compute_api.get_migrations(context, req.GET)
return {'migrations': output(migrations)}
class Migrations(extensions.V3APIExtensionBase):
"""Provide data on migrations."""
name = "Migrations"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
resource = extensions.ResourceExtension(ALIAS,
MigrationsController())
resources.append(resource)
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
IvanGavran/scrapy | tests/test_dupefilters.py | 15 | 1661 | import hashlib
import unittest
from scrapy.dupefilters import RFPDupeFilter
from scrapy.http import Request
from scrapy.utils.python import to_bytes
class RFPDupeFilterTest(unittest.TestCase):
def test_filter(self):
dupefilter = RFPDupeFilter()
dupefilter.open()
r1 = Request('http://scrapytest.org/1')
r2 = Request('http://scrapytest.org/2')
r3 = Request('http://scrapytest.org/2')
assert not dupefilter.request_seen(r1)
assert dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
assert dupefilter.request_seen(r3)
dupefilter.close('finished')
def test_request_fingerprint(self):
"""Test if customization of request_fingerprint method will change
output of request_seen.
"""
r1 = Request('http://scrapytest.org/index.html')
r2 = Request('http://scrapytest.org/INDEX.html')
dupefilter = RFPDupeFilter()
dupefilter.open()
assert not dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
dupefilter.close('finished')
class CaseInsensitiveRFPDupeFilter(RFPDupeFilter):
def request_fingerprint(self, request):
fp = hashlib.sha1()
fp.update(to_bytes(request.url.lower()))
return fp.hexdigest()
case_insensitive_dupefilter = CaseInsensitiveRFPDupeFilter()
case_insensitive_dupefilter.open()
assert not case_insensitive_dupefilter.request_seen(r1)
assert case_insensitive_dupefilter.request_seen(r2)
case_insensitive_dupefilter.close('finished')
| bsd-3-clause |
amenonsen/ansible | test/units/modules/network/fortios/test_fortios_vpn_ssl_web_host_check_software.py | 21 | 9924 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_ssl_web_host_check_software
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_ssl_web_host_check_software.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_ssl_web_host_check_software_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_host_check_software': {'guid': 'test_value_3',
'name': 'default_name_4',
'os_type': 'windows',
'type': 'av',
'version': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_host_check_software.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'guid': 'test_value_3',
'name': 'default_name_4',
'os-type': 'windows',
'type': 'av',
'version': 'test_value_7'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'host-check-software', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_host_check_software_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_host_check_software': {'guid': 'test_value_3',
'name': 'default_name_4',
'os_type': 'windows',
'type': 'av',
'version': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_host_check_software.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'guid': 'test_value_3',
'name': 'default_name_4',
'os-type': 'windows',
'type': 'av',
'version': 'test_value_7'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'host-check-software', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_host_check_software_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_host_check_software': {'guid': 'test_value_3',
'name': 'default_name_4',
'os_type': 'windows',
'type': 'av',
'version': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_host_check_software.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'host-check-software', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_host_check_software_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_host_check_software': {'guid': 'test_value_3',
'name': 'default_name_4',
'os_type': 'windows',
'type': 'av',
'version': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_host_check_software.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'host-check-software', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_host_check_software_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_host_check_software': {'guid': 'test_value_3',
'name': 'default_name_4',
'os_type': 'windows',
'type': 'av',
'version': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_host_check_software.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'guid': 'test_value_3',
'name': 'default_name_4',
'os-type': 'windows',
'type': 'av',
'version': 'test_value_7'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'host-check-software', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_ssl_web_host_check_software_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_host_check_software': {
'random_attribute_not_valid': 'tag', 'guid': 'test_value_3',
'name': 'default_name_4',
'os_type': 'windows',
'type': 'av',
'version': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_host_check_software.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'guid': 'test_value_3',
'name': 'default_name_4',
'os-type': 'windows',
'type': 'av',
'version': 'test_value_7'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'host-check-software', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
merwok-forks/facepy | docs/source/conf.py | 6 | 7058 | # -*- coding: utf-8 -*-
#
# Facepy documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 27 20:26:42 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
from facepy import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Facepy'
copyright = u'2011, Johannes Gorset'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_style = 'rtd.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Facepydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Facepy.tex', u'Facepy Documentation',
u'Johannes Gorset', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'facepy', u'Facepy Documentation',
[u'Johannes Gorset'], 1)
]
| mit |
mjordan/pkppln | server.py | 1 | 1777 | #!/usr/bin/env python
import sys
import bottle
from bottle import Bottle, request, error, response, Response
from os.path import abspath, dirname
import logging
sys.path.append(dirname(abspath(__file__)))
import pkppln
from webapp.admin.terms_server import TermsApp
from webapp.sword.sword_server import SwordServer
from webapp.static.static_server import StaticApp
from webapp.feeds.feed_server import FeedsApp
from webapp.admin.journal_server import JournalsApp
def after_request():
if request.path.startswith('/static'):
return
try:
route_name = request.route.callback.__name__
except:
route_name = '(unknown)'
try:
pkppln.log_message(" - ".join([
'finished', request.get('REMOTE_ADDR'),
request.method, request.path,
type(request.app).__name__ + "#" + route_name
]), logging.INFO)
except:
pass
def before_request():
# pkppln.log_message(" - ".join([
# 'starting', request.get('REMOTE_ADDR'),
# request.method, request.path]))
pkppln.initialize()
static_path = dirname(abspath(__file__)) + '/static'
application = bottle.default_app()
application.add_hook('before_request', before_request)
application.add_hook('after_request', after_request)
application.mount('/static/', StaticApp('Static', static_path))
application.mount('/admin/terms/', TermsApp('Terms'))
application.mount('/admin/journals/', JournalsApp('JournalsApp'))
application.mount('/feeds/', FeedsApp('Feeds'))
application.mount('/api/sword/2.0/', SwordServer('SWORD'))
if __name__ == '__main__':
if len(sys.argv) == 2:
pkppln.config_file_name = sys.argv[1]
bottle.debug(True)
application.run(host='127.0.0.1', port=9999, reloader=True)
| gpl-3.0 |
2uller/LotF | App/Lib/site-packages/numpy/oldnumeric/fix_default_axis.py | 101 | 8035 | """
This module adds the default axis argument to code which did not specify it
for the functions where the default was changed in NumPy.
The functions changed are
add -1 ( all second argument)
======
nansum
nanmax
nanmin
nanargmax
nanargmin
argmax
argmin
compress 3
add 0
======
take 3
repeat 3
sum # might cause problems with builtin.
product
sometrue
alltrue
cumsum
cumproduct
average
ptp
cumprod
prod
std
mean
"""
__all__ = ['convertfile', 'convertall', 'converttree']
import sys
import os
import re
import glob
_args3 = ['compress', 'take', 'repeat']
_funcm1 = ['nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin',
'argmax', 'argmin', 'compress']
_func0 = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue',
'cumsum', 'cumproduct', 'average', 'ptp', 'cumprod', 'prod',
'std', 'mean']
_all = _func0 + _funcm1
func_re = {}
for name in _all:
_astr = r"""%s\s*[(]"""%name
func_re[name] = re.compile(_astr)
import string
disallowed = '_' + string.uppercase + string.lowercase + string.digits
def _add_axis(fstr, name, repl):
alter = 0
if name in _args3:
allowed_comma = 1
else:
allowed_comma = 0
newcode = ""
last = 0
for obj in func_re[name].finditer(fstr):
nochange = 0
start, end = obj.span()
if fstr[start-1] in disallowed:
continue
if fstr[start-1] == '.' \
and fstr[start-6:start-1] != 'numpy' \
and fstr[start-2:start-1] != 'N' \
and fstr[start-9:start-1] != 'numarray' \
and fstr[start-8:start-1] != 'numerix' \
and fstr[start-8:start-1] != 'Numeric':
continue
if fstr[start-1] in ['\t',' ']:
k = start-2
while fstr[k] in ['\t',' ']:
k -= 1
if fstr[k-2:k+1] == 'def' or \
fstr[k-4:k+1] == 'class':
continue
k = end
stack = 1
ncommas = 0
N = len(fstr)
while stack:
if k>=N:
nochange =1
break
if fstr[k] == ')':
stack -= 1
elif fstr[k] == '(':
stack += 1
elif stack == 1 and fstr[k] == ',':
ncommas += 1
if ncommas > allowed_comma:
nochange = 1
break
k += 1
if nochange:
continue
alter += 1
newcode = "%s%s,%s)" % (newcode, fstr[last:k-1], repl)
last = k
if not alter:
newcode = fstr
else:
newcode = "%s%s" % (newcode, fstr[last:])
return newcode, alter
def _import_change(fstr, names):
# Four possibilities
# 1.) import numpy with subsequent use of numpy.<name>
# change this to import numpy.oldnumeric as numpy
# 2.) import numpy as XXXX with subsequent use of
# XXXX.<name> ==> import numpy.oldnumeric as XXXX
# 3.) from numpy import *
# with subsequent use of one of the names
# 4.) from numpy import ..., <name>, ... (could span multiple
# lines. ==> remove all names from list and
# add from numpy.oldnumeric import <name>
num = 0
# case 1
importstr = "import numpy"
ind = fstr.find(importstr)
if (ind > 0):
found = 0
for name in names:
ind2 = fstr.find("numpy.%s" % name, ind)
if (ind2 > 0):
found = 1
break
if found:
fstr = "%s%s%s" % (fstr[:ind], "import numpy.oldnumeric as numpy",
fstr[ind+len(importstr):])
num += 1
# case 2
importre = re.compile("""import numpy as ([A-Za-z0-9_]+)""")
modules = importre.findall(fstr)
if len(modules) > 0:
for module in modules:
found = 0
for name in names:
ind2 = fstr.find("%s.%s" % (module, name))
if (ind2 > 0):
found = 1
break
if found:
importstr = "import numpy as %s" % module
ind = fstr.find(importstr)
fstr = "%s%s%s" % (fstr[:ind],
"import numpy.oldnumeric as %s" % module,
fstr[ind+len(importstr):])
num += 1
# case 3
importstr = "from numpy import *"
ind = fstr.find(importstr)
if (ind > 0):
found = 0
for name in names:
ind2 = fstr.find(name, ind)
if (ind2 > 0) and fstr[ind2-1] not in disallowed:
found = 1
break
if found:
fstr = "%s%s%s" % (fstr[:ind],
"from numpy.oldnumeric import *",
fstr[ind+len(importstr):])
num += 1
# case 4
ind = 0
importstr = "from numpy import"
N = len(importstr)
while 1:
ind = fstr.find(importstr, ind)
if (ind < 0):
break
ind += N
ptr = ind+1
stack = 1
while stack:
if fstr[ptr] == '\\':
stack += 1
elif fstr[ptr] == '\n':
stack -= 1
ptr += 1
substr = fstr[ind:ptr]
found = 0
substr = substr.replace('\n',' ')
substr = substr.replace('\\','')
importnames = [x.strip() for x in substr.split(',')]
# determine if any of names are in importnames
addnames = []
for name in names:
if name in importnames:
importnames.remove(name)
addnames.append(name)
if len(addnames) > 0:
fstr = "%s%s\n%s\n%s" % \
(fstr[:ind],
"from numpy import %s" % \
", ".join(importnames),
"from numpy.oldnumeric import %s" % \
", ".join(addnames),
fstr[ptr:])
num += 1
return fstr, num
def add_axis(fstr, import_change=False):
total = 0
if not import_change:
for name in _funcm1:
fstr, num = _add_axis(fstr, name, 'axis=-1')
total += num
for name in _func0:
fstr, num = _add_axis(fstr, name, 'axis=0')
total += num
return fstr, total
else:
fstr, num = _import_change(fstr, _funcm1+_func0)
return fstr, num
def makenewfile(name, filestr):
fid = file(name, 'w')
fid.write(filestr)
fid.close()
def getfile(name):
fid = file(name)
filestr = fid.read()
fid.close()
return filestr
def copyfile(name, fstr):
base, ext = os.path.splitext(name)
makenewfile(base+'.orig', fstr)
return
def convertfile(filename, import_change=False):
"""Convert the filename given from using Numeric to using NumPy
Copies the file to filename.orig and then over-writes the file
with the updated code
"""
filestr = getfile(filename)
newstr, total = add_axis(filestr, import_change)
if total > 0:
print "Changing ", filename
copyfile(filename, filestr)
makenewfile(filename, newstr)
sys.stdout.flush()
def fromargs(args):
filename = args[1]
convertfile(filename)
def convertall(direc=os.path.curdir, import_change=False):
"""Convert all .py files in the directory given
For each file, a backup of <usesnumeric>.py is made as
<usesnumeric>.py.orig. A new file named <usesnumeric>.py
is then written with the updated code.
"""
files = glob.glob(os.path.join(direc,'*.py'))
for afile in files:
convertfile(afile, import_change)
def _func(arg, dirname, fnames):
convertall(dirname, import_change=arg)
def converttree(direc=os.path.curdir, import_change=False):
"""Convert all .py files in the tree given
"""
os.path.walk(direc, _func, import_change)
if __name__ == '__main__':
fromargs(sys.argv)
| gpl-2.0 |
nzjrs/conduit | conduit/Module.py | 2 | 12388 | """
Classes associated with dynamic module loading
Copyright: John Stowers, 2006
License: GPLv2
"""
import gobject
import os, os.path
import traceback
import pydoc
import logging
log = logging.getLogger("Module")
import conduit.dataproviders
import conduit.ModuleWrapper as ModuleWrapper
import conduit.Knowledge as Knowledge
import conduit.vfs as Vfs
from gettext import gettext as _
class ModuleManager(gobject.GObject):
"""
Generic dynamic module loader for conduit. Given a path
it loads all modules in that directory, keeping them in an
internam array which may be returned via get_modules
Also manages dataprovider factories which make dataproviders available
at runtime
"""
__gsignals__ = {
#Fired when a new instantiatable DP becomes available. It is described via
#a wrapper because we do not actually instantiate it till later - to save memory
"dataprovider-available" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), #The DPW describing the new DP class
"dataprovider-unavailable" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), #The DPW describing the DP class which is now unavailable
# Fired when load_all has loaded every available modules
"all-modules-loaded" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []),
# Fired when a syncset is created
"syncset-added" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [
gobject.TYPE_PYOBJECT]), #The syncset that was added
}
def __init__(self, dirs=None):
"""
@param dirs: A list of directories to search. Relative pathnames and paths
containing ~ will be expanded. If dirs is None the
ModuleLoader will not search for modules.
@type dirs: C{string[]}
"""
gobject.GObject.__init__(self)
#Dict of loaded modulewrappers. key is wrapper.get_key()
#Stored seperate to the classes because dynamic dataproviders may
#use the same class but with different initargs (diff keys)
self.moduleWrappers = {}
#Files that could not be loaded properly
self.invalidFiles = []
#Keep a ref to dataprovider factories so they are not collected
self.dataproviderFactories = []
#scan all dirs for files in the right format (*Module/*Module.py)
self.filelist = self._build_filelist_from_directories(dirs)
def _on_dynamic_dataprovider_added(self, monitor, dpw, klass):
"""
Store the ipod so it can be retrieved later by the treeview/model
emit a signal so it is added to the GUI
"""
log.info("Dynamic dataprovider (%s) available by %s" % (dpw, monitor))
self._append_module(dpw, klass)
def _on_dynamic_dataprovider_removed(self, monitor, key):
log.info("Dynamic dataprovider (%s) unavailable by %s" % (key, monitor))
self._remove_module(key)
def _emit_available(self, dataproviderWrapper):
if dataproviderWrapper.module_type in ["source", "sink", "twoway"]:
self.emit("dataprovider-available", dataproviderWrapper)
def _emit_unavailable(self, dataproviderWrapper):
if dataproviderWrapper.module_type in ["source", "sink", "twoway"]:
self.emit("dataprovider-unavailable", dataproviderWrapper)
def _build_filelist_from_directories(self, directories=None):
"""
Converts a given array of directories into a list
containing the filenames of all qualified modules. Recurses into
directories and adds files if they have the same name as the
directory in which they reside.
This method is automatically invoked by the constructor.
"""
res = []
if not directories:
return res
#convert to abs path
directories = [os.path.abspath(os.path.expanduser(s)) for s in directories]
while len(directories) > 0:
d = directories.pop(0)
try:
if not os.path.exists(d):
continue
for i in os.listdir(d):
f = os.path.join(d,i)
if os.path.isfile(f) and self._is_module(f):
if os.path.basename(f) not in [os.path.basename(j) for j in res]:
res.append(f)
elif os.path.isdir(f) and self._is_module_dir(f):
directories.append(f)
except OSError, err:
log.warn("Error reading directory %s, skipping." % (d))
return res
def _is_module(self, filename):
return filename.endswith("Module.py")
def _is_module_dir(self, dirname):
return dirname.endswith("Module")
def _append_module(self, wrapper, klass):
#Check if the wrapper is unique
key = wrapper.get_dnd_key()
if key not in self.moduleWrappers:
self.moduleWrappers[key] = wrapper
#Emit a signal because this wrapper is new
self._emit_available(wrapper)
else:
log.warn("Wrapper with key %s allready loaded" % key)
def _remove_module(self, key):
"""
Looks for a given key in the class registry and attempts to remove it
@param key: The key of the class to remove
"""
if not key in self.moduleWrappers:
log.warn("Unable to remove class - it isn't available! (%s)" % key)
return
#keep a ref for the signal emission
dpw = self.moduleWrappers[key]
# remove from moduleWrappers...
del self.moduleWrappers[key]
# notify everything that dp is no longer available
self._emit_unavailable(dpw)
def _import_file(self, filename):
"""
Tries to import the specified file. Returns the python module on succes.
Primarily for internal use. Note that the python module returned may actually
contain several more loadable modules.
"""
mods = pydoc.importfile (filename)
try:
if (mods.MODULES): pass
except AttributeError:
log.warn("The file %s is not a valid module. Skipping." % (filename))
log.warn("A module must have the variable MODULES defined as a dictionary.")
raise
for modules, infos in mods.MODULES.items():
for i in ModuleWrapper.COMPULSORY_ATTRIBUTES:
if i not in infos:
log.warn("Class %s in file %s does define a %s attribute. Skipping." % (modules, filename, i))
raise Exception
return mods
def _load_modules_in_file(self, filename):
"""
Loads all modules in the given file
"""
try:
mod = self._import_file(filename)
for modules, infos in mod.MODULES.items():
try:
klass = getattr(mod, modules)
if infos["type"] == "dataprovider" or infos["type"] == "converter":
mod_wrapper = ModuleWrapper.ModuleWrapper(
klass=klass,
initargs=(),
category=getattr(klass, "_category_", conduit.dataproviders.CATEGORY_TEST)
)
#Save the module (signal is emitted in _append_module)
self._append_module(
mod_wrapper,
klass
)
elif infos["type"] == "dataprovider-factory":
# build a dict of kwargs to pass to factories
kwargs = {
"moduleManager": self,
}
#instantiate and store the factory
instance = klass(**kwargs)
self.dataproviderFactories.append(instance)
else:
log.warn("Class is an unknown type: %s" % klass)
except AttributeError:
log.warn("Could not find module %s in %s\n%s" % (modules,filename,traceback.format_exc()))
except pydoc.ErrorDuringImport, e:
log.warn("Error loading the file: %s\n%s" % (filename, "".join(traceback.format_exception(e.exc,e.value,e.tb))))
self.invalidFiles.append(os.path.basename(filename))
except Exception, e:
log.error("Error loading the file: %s\n%s" % (filename, traceback.format_exc()))
self.invalidFiles.append(os.path.basename(filename))
def load_all(self, whitelist, blacklist):
"""
Loads all classes in the configured paths.
If whitelist and blacklist are supplied then the name of the file
is tested against them. Default policy is to load all modules unless
"""
for f in self.filelist:
name, ext = Vfs.uri_get_filename_and_extension(f)
if whitelist:
if name in whitelist:
self._load_modules_in_file(f)
elif blacklist:
if name not in blacklist:
self._load_modules_in_file(f)
else:
self._load_modules_in_file(f)
for i in self.dataproviderFactories:
i.connect("dataprovider-removed", self._on_dynamic_dataprovider_removed)
i.connect("dataprovider-added", self._on_dynamic_dataprovider_added)
i.probe()
self.emit('all-modules-loaded')
def get_all_modules(self):
"""
@returns: All loaded modules
@rtype: L{conduit.ModuleManager.ModuleWrapper}[]
"""
return self.moduleWrappers.values()
def get_modules_by_type(self, *type_filter):
"""
Returns all loaded modules of type specified by type_filter
or all if the filter is set to None.
"""
if len(type_filter) == 0:
return self.moduleWrappers.values()
return [i for i in self.moduleWrappers.values() if i.module_type in type_filter]
def get_module_wrapper_with_instance(self, wrapperKey):
"""
Returns a new ModuleWrapper with a dp instace described by wrapperKey
"""
mod_wrapper = None
if wrapperKey in self.moduleWrappers:
#Get the existing wrapper
m = self.moduleWrappers[wrapperKey]
#Make a copy of it, containing an instantiated module
mod_wrapper = ModuleWrapper.ModuleWrapper(
klass=m.klass,
initargs=m.initargs,
category=m.category
)
mod_wrapper.instantiate_module()
else:
log.warn("Could not find module wrapper: %s" % (wrapperKey))
mod_wrapper = ModuleWrapper.PendingDataproviderWrapper(wrapperKey)
return mod_wrapper
def make_modules_callable(self, type_filter):
"""
If it is necesary to call the modules directly. This
function creates those instances in wrappers of the specified type
"""
for i in self.moduleWrappers.values():
if i.module_type == type_filter:
i.instantiate_module()
def list_preconfigured_conduits(self):
#strip the keys back to the classnames, because the preconfigured dps
#are described in terms of classes, not instances (keys)
names = {}
for key in self.moduleWrappers:
names[key.split(":")[0]] = key
#for a preconfigured conduit to be available, both the
#source and sink must be loaded
found = []
for (source,sink),(comment,twoway) in Knowledge.PRECONFIGIRED_CONDUITS.items():
if source in names and sink in names:
#return key,key,desc,two-way
found.append( (names[source],names[sink],_(comment),twoway) )
return found
def quit(self):
for dpf in self.dataproviderFactories:
dpf.quit()
| gpl-2.0 |
switchboardOp/ansible | lib/ansible/modules/cloud/azure/azure.py | 8 | 24636 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine
(e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of
type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine.
See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
- name: Provision virtual machine example
azure:
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: East US
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: True
state: present
delegate_to: localhost
- name: Terminate virtual machine example
azure:
name: my-virtual-machine
state: absent
delegate_to: localhost
- name: Create windows machine
azure:
name: ben-Winows-23
hostname: win123
os_type: windows
enable_winrm: True
subscription_id: '{{ azure_sub_id }}'
management_cert_path: '{{ azure_cert_path }}'
role_size: Small
image: bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5
location: East Asia
password: xxx
storage_account: benooytes
user: admin
wait: True
state: present
virtual_network_name: '{{ vnet_name }}'
delegate_to: localhost
'''
import base64
import datetime
import os
import signal
import time
from urlparse import urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.timeout import TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_D1_v2',
'Standard_D2_v2',
'Standard_D3_v2',
'Standard_D4_v2',
'Standard_D5_v2',
'Standard_D11_v2',
'Standard_D12_v2',
'Standard_D13_v2',
'Standard_D14_v2',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Standard_G3',
'Standard_G4',
'Standard_G5']
from distutils.version import LooseVersion
try:
import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except AzureException as e:
raise AzureException("failed to get or delete disk %s, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
# Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except AzureException as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except AzureMissingException as e:
pass # no such deployment or service
except AzureException as e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except AzureException as e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except AzureException as e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detached.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detached by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError) as e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except AzureException as e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(no_log=True),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if isinstance(func, MethodType):
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except AzureException as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
if __name__ == '__main__':
main()
| gpl-3.0 |
egoid/baytree | lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py | 508 | 2309 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import base
from ..constants import voidElements, namespaces
class TreeWalker(base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, _ = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END or
next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if namespace != namespaces["html"] or name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| mit |
marshallmcdonnell/interactive_plotting | matplotlib/draggable_legend_code.py | 1 | 3140 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as _plt
class DraggableLegend:
def __init__(self, legend):
self.legend = legend
self.gotLegend = False
legend.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
legend.figure.canvas.mpl_connect('pick_event', self.on_picker)
legend.figure.canvas.mpl_connect('button_release_event', self.on_release)
legend.set_picker(self.my_legend_picker)
#----------------------------------------------------#
# Connected event handlers
def on_motion(self, event):
if self.gotLegend:
dx = event.x - self.mouse_x
dy = event.y - self.mouse_y
loc_in_canvas = self.legend_x + dx, self.legend_y + dy
loc_in_norm_axes = self.legend.parent.transAxes.inverted().transform_point(loc_in_canvas)
self.legend._loc = tuple(loc_in_norm_axes)
self.legend.figure.canvas.draw()
def my_legend_picker(self, legend, event):
return self.legend.legendPatch.contains(event)
def on_picker(self, event):
if event.artist == self.legend:
# left-click
if event.mouseevent.button == 1:
self._move_legend(event)
# mouse button pressed
if event.mouseevent.button == 2:
pass
# right-click
if event.mouseevent.button == 3:
self._hideLegend()
# mouse up
if event.mouseevent.button == 'up':
self._scaleUpLegendFont()
# mouse down
if event.mouseevent.button == 'down':
self._scaleDownLegendFont()
def on_release(self, event):
if self.gotLegend:
self.gotLegend = False
#----------------------------------------------------#
# Utility functions
def _move_legend(self,event):
bbox = self.legend.get_window_extent()
self.mouse_x = event.mouseevent.x
self.mouse_y = event.mouseevent.y
self.legend_x = bbox.xmin
self.legend_y = bbox.ymin
self.gotLegend = 1
def _scaleUpLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size += size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _scaleDownLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size -= size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _hideLegend(self):
if self.legend.get_visible():
self.legend.set_visible(False)
else:
self.legend.set_visible(True)
self.legend.figure.canvas.draw()
figure = _plt.figure()
ax = figure.add_subplot(111)
scatter = ax.scatter(np.random.randn(100), np.random.randn(100), label='hi')
legend = ax.legend()
legend = DraggableLegend(legend)
_plt.show()
| mit |
gromitsun/sim-xrf | python/snr/pysnip.py | 1 | 2468 | import numpy as np
from scipy.optimize import curve_fit
def FWHM(x, noise=100, fano=0.114):
sigma = np.sqrt((noise / 2.3548) ** 2 + 3.58 * fano * x)
return 2.3548 * sigma
def fit_FWHM(x, F):
def _FWHM(x, noise, fano):
return (noise / 2.3548) ** 2 + 3.58 * fano * x
popt, pcov = curve_fit(_FWHM, x, (F / 2.3548) ** 2, p0=[100, 0.114])
return popt
def energy_to_channel(energy, offset=2.97, gain=12.26952):
return 1. * (energy - offset) / gain
# # # Low statistics digital filter
def lsdf(E, y, FWHM=FWHM,
f=1.5,
A=75,
M=10,
r=1.3):
def _reduce(x, length_start):
for i in range(length_start):
length = length_start - i
if x < length:
raise IndexError
L = y[x - length:x].sum()
R = y[x + 1:x + length + 1].sum()
S = y[x] + L + R
slope = (R + 1.) / (L + 1.)
if S < M or S < A * np.sqrt(y[x]) or (1. / r <= slope <= r):
return S / (2. * length + 1)
print 'Not found for x = %d!' % x
return y[x]
y_out = y.copy()
for x in range(len(E)):
try:
len_0 = int(energy_to_channel(f * FWHM(E[x]), E[0], E[1] - E[0]))
y_out[x] = _reduce(x, len_0)
except IndexError:
pass
return y_out
# # # Peak-clipping
def snip(E, y, FWHM=FWHM, offset=0., gain=10., **kwargs):
det = kwargs.get('detector')
loops = kwargs.get('loops', 24)
end_loops = kwargs.get('end_loops', 8)
reduce_factor = kwargs.get('reduce_factor', np.sqrt(2))
factor = kwargs.get('factor', 2)
if det is not None:
FWHM = det.response.FWHM
offset = det.channel.offset
gain = det.channel.gain
def G(y):
return np.log(np.log(y + 1) + 1)
def w(x, factor=2):
return energy_to_channel(factor * FWHM(E[x]), offset=offset, gain=gain)
def G_inv(z):
return np.exp(np.exp(z) - 1) - 1
z_out = G(y)
for i in range(loops):
if i >= loops - end_loops:
factor /= 1. * reduce_factor
z = z_out.copy()
for x in range(len(E)):
try:
_w = w(x, factor=factor)
if _w > x:
raise IndexError
z_bar = (z[x + _w] + z[x - _w]) / 2.
z_out[x] = min(z[x], z_bar)
except IndexError:
pass
return G_inv(z_out)
| mit |
lz1988/company-site | django/db/models/sql/aggregates.py | 195 | 3977 | """
Classes to represent the default SQL aggregate functions
"""
from django.db.models.fields import IntegerField, FloatField
# Fake fields used to identify aggregate types in data-conversion operations.
ordinal_aggregate_field = IntegerField()
computed_aggregate_field = FloatField()
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
| bsd-3-clause |
dhanunjaya/neutron | neutron/db/migration/alembic_migrations/versions/4119216b7365_add_tenant_id_idx.py | 47 | 1466 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add index on tenant_id column
Revision ID: 4119216b7365
Revises: 28c0ffb8ebbd
Create Date: 2014-12-19 12:21:54.439723
"""
# revision identifiers, used by Alembic.
revision = '4119216b7365'
down_revision = 'bebba223288'
from alembic import op
TABLES = ['floatingips', 'networkconnections', 'networkgatewaydevices',
'networks', 'packetfilters', 'ports', 'qosqueues', 'routers',
'securitygrouprules', 'securitygroups', 'subnets', 'meteringlabels',
'arista_provisioned_nets', 'arista_provisioned_tenants',
'arista_provisioned_vms', 'cisco_hosting_devices',
'cisco_ml2_apic_contracts', 'ml2_brocadenetworks',
'ml2_brocadeports']
def upgrade():
for table in TABLES:
op.create_index(op.f('ix_%s_tenant_id' % table),
table, ['tenant_id'], unique=False)
| apache-2.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/tests/modeltests/model_forms/tests.py | 7 | 9017 | import datetime
from django.test import TestCase
from django import forms
from models import Category, Writer, Book, DerivedBook, Post
from mforms import (ProductForm, PriceForm, BookForm, DerivedBookForm,
ExplicitPKForm, PostForm, DerivedPostForm, CustomWriterForm)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(TestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
# unique/unique_together validation
class UniqueTest(TestCase):
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], [u'Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Price with this Price and Quantity already exists.'])
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': u'1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], [u'Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], [u'Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
dbook = DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': '9876', 'suffix1': u'0', 'suffix2': u'0'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], [u'Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': u'', 'desc': u'' })
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': u'key1', 'desc': u''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': u'key1', 'desc': u''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], [u'Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], [u'Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], [u'Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], [u'Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], [u'Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], [u'Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], [u'This field is required.'])
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], [u'Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], [u'Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], [u'Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
| apache-2.0 |
dmsurti/mayavi | mayavi/sources/metadata.py | 5 | 10449 | """
Metadata for all sources.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Prabhu Ramachandran Enthought, Inc.
# License: BSD Style.
# Local imports.
from mayavi.core.metadata import SourceMetadata
from mayavi.core.pipeline_info import PipelineInfo
BASE = 'mayavi.sources'
open_3ds = SourceMetadata(
id = "3DSFile",
class_name = BASE + ".three_ds_importer.ThreeDSImporter",
tooltip = "Import a 3D Studio file",
desc = "Import a 3D Studio file",
help = "Import a 3D Studio file",
menu_name = "&3D Studio file",
extensions = ['3ds'],
wildcard = '3D Studio files (*.3ds)|*.3ds',
output_info = PipelineInfo(datasets=['none'],
attribute_types=['any'],
attributes=['any'])
)
open_image = SourceMetadata(
id = "ImageFile",
class_name = BASE + ".image_reader.ImageReader",
menu_name = "&Image file (PNG/JPG/BMP/PNM/TIFF/DEM/DCM/XIMG/MHA/MHD/MINC)",
tooltip = "Import a PNG/JPG/BMP/PNM/TIFF/DCM/DEM/XIMG/MHA/MHD/MINC image",
desc = "Import a PNG/JPG/BMP/PNM/TIFF/DCM/DEM/XIMG/MHA/MHD/MINC image",
extensions = ['png', 'jpg', 'jpeg', 'bmp', 'pnm', 'tiff', 'dcm', 'dem',
'ximg', 'mha', 'mhd', 'mnc'],
wildcard = 'PNG files (*.png)|*.png|'\
'JPEG files (*.jpg)|*.jpg|'\
'JPEG files (*.jpeg)|*.jpeg|'\
'BMP files (*.bmp)|*.bmp|'\
'PNM files (*.pnm)|*.pnm|'\
'DCM files (*.dcm)|*.dcm|'\
'DEM files (*.dem)|*.dem|'\
'Meta mha files (*.mha)|*.mha|'\
'Meta mhd files (*.mhd)|*.mhd|'\
'MINC files (*.mnc)|*.mnc|'\
'XIMG files (*.ximg)|*.ximg|'\
'TIFF files (*.tiff)|*.tiff',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
open_poly_data = SourceMetadata(
id = "PolyDataFile",
class_name = BASE + ".poly_data_reader.PolyDataReader",
menu_name = "&PolyData file (STL/STLA/STLB/TXT/RAW/PLY/PDB/SLC/FACET\
/OBJ/BYU/XYZ/CUBE)",
tooltip = "Import a STL/STLA/STLB/TXT/RAW/PLY/PDB/SLC/FACET/OBJ/\
BYU/XYZ/CUBE Poly Data",
desc = "Import a STL/STLA/STLB/TXT/RAWPLY/PDB/SLC/FACET/OBJ/BYU/XYZ/\
CUBE Poly Data",
extensions = ['stl', 'stla', 'stlb', 'txt', 'raw', 'ply', 'pdb', 'slc',
'facet', 'xyz', 'cube', 'obj', 'g'],
wildcard = 'STL files (*.stl)|*.stl|'\
'STLA files (*.stla)|*.stla|'\
'STLB files (*.stlb)|*.stlb|'\
'BYU files (*.g)|*.g|'\
'TXT files (*.txt)|*.txt|'\
'RAW files (*.raw)|*.raw|'\
'PLY files (*.ply)|*.ply|'\
'PDB files (*.pdb)|*.pdb|'\
'SLC files (*.slc)|*.slc|'\
'XYZ files (*.xyz)|*.xyz|'\
'CUBE files (*.cube)|*.cube|'\
'FACET files (*.facet)|*.facet|'\
'OBJ files (*.obj)|*.obj',
can_read_test = 'mayavi.sources.poly_data_reader:PolyDataReader.can_read',
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
)
open_ugrid_data = SourceMetadata(
id = "VTKUnstructuredFile",
class_name = BASE + ".unstructured_grid_reader.UnstructuredGridReader",
menu_name = "&Unstrucured Grid fil (INP/NEU/EXII)",
tooltip = "Open a Unstrucured Grid file",
desc = "Open a Unstrucured Grid file",
help = "Open a Unstrucured Grid file",
extensions = ['inp', 'neu', 'exii'],
wildcard = 'AVSUCD INP files (*.inp)|*.inp|'\
'GAMBIT NEU (*.neu)|*.neu|'\
'EXODUS EXII (*.exii)|*.exii',
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
open_plot3d = SourceMetadata(
id = "PLOT3DFile",
class_name = BASE + ".plot3d_reader.PLOT3DReader",
menu_name = "&PLOT3D file",
tooltip = "Open a PLOT3D data data",
desc = "Open a PLOT3D data data",
help = "Open a PLOT3D data data",
extensions = ['xyz'],
wildcard = 'PLOT3D files (*.xyz)|*.xyz',
output_info = PipelineInfo(datasets=['structured_grid'],
attribute_types=['any'],
attributes=['any'])
)
open_vrml = SourceMetadata(
id = "VRMLFile",
class_name = BASE + ".vrml_importer.VRMLImporter",
menu_name = "V&RML2 file",
tooltip = "Import a VRML2 data file",
desc = "Import a VRML2 data file",
help = "Import a VRML2 data file",
extensions = ['wrl'],
wildcard = 'VRML2 files (*.wrl)|*.wrl',
output_info = PipelineInfo(datasets=['none'],
attribute_types=['any'],
attributes=['any'])
)
open_vtk = SourceMetadata(
id = "VTKFile",
class_name = BASE + ".vtk_file_reader.VTKFileReader",
menu_name = "&VTK file",
tooltip = "Open a VTK data file",
desc = "Open a VTK data file",
help = "Open a VTK data file",
extensions = ['vtk'],
wildcard = 'VTK files (*.vtk)|*.vtk',
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
open_vtk_xml = SourceMetadata(
id = "VTKXMLFile",
class_name = BASE + ".vtk_xml_file_reader.VTKXMLFileReader",
menu_name = "VTK &XML file",
tooltip = "Open a VTK XML data file",
desc = "Open a VTK XML data file",
help = "Open a VTK XML data file",
extensions = ['xml', 'vti', 'vtp', 'vtr', 'vts', 'vtu',
'pvti', 'pvtp', 'pvtr', 'pvts', 'pvtu'],
wildcard = 'VTK XML files (*.xml)|*.xml|'\
'Image Data (*.vti)|*.vti|'\
'Poly Data (*.vtp)|*.vtp|'\
'Rectilinear Grid (*.vtr)|*.vtr|'\
'Structured Grid (*.vts)|*.vts|'\
'Unstructured Grid (*.vtu)|*.vtu|'\
'Parallel Image Data (*.pvti)|*.pvti|'\
'Parallel Poly Data (*.pvtp)|*.pvtp|'\
'Parallel Rectilinear Grid (*.pvtr)|*.pvtr|'\
'Parallel Structured Grid (*.pvts)|*.pvts|'\
'Parallel Unstructured Grid (*.pvtu)|*.pvtu',
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
parametric_surface = SourceMetadata(
id = "ParametricSurfaceSource",
class_name = BASE + ".parametric_surface.ParametricSurface",
menu_name = "&Create Parametric surface source",
tooltip = "Create a parametric surface source",
desc = "Create a parametric surface source",
help = "Create a parametric surface source",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
)
point_load = SourceMetadata(
id = "PointLoadSource",
class_name = BASE + ".point_load.PointLoad",
menu_name = "Create Point &load source",
tooltip = "Simulates a point load on a cube of data (for tensors)",
desc = "Simulates a point load on a cube of data (for tensors)",
help = "Simulates a point load on a cube of data (for tensors)",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
builtin_surface = SourceMetadata(
id = "BuiltinSurfaceSource",
class_name = BASE + ".builtin_surface.BuiltinSurface",
menu_name = "Create built-in &surface",
tooltip = "Create a vtk poly data source",
desc = "Create a vtk poly data source",
help = "Create a vtk poly data source",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
)
builtin_image = SourceMetadata(
id = "BuiltinImageSource",
class_name = BASE + ".builtin_image.BuiltinImage",
menu_name = "Create built-in &image",
tooltip = "Create a vtk image data source",
desc = "Create a vtk image data source",
help = "Create a vtk image data source",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
open_volume = SourceMetadata(
id = "VolumeFile",
class_name = BASE + ".volume_reader.VolumeReader",
menu_name = "&Volume file",
tooltip = "Open a Volume file",
desc = "Open a Volume file",
help = "Open a Volume file",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
)
open_chaco = SourceMetadata(
id = "ChacoFile",
class_name = BASE + ".chaco_reader.ChacoReader",
menu_name = "&Chaco file",
tooltip = "Open a Chaco file",
desc = "Open a Chaco file",
help = "Open a Chaco file",
extensions = [],
wildcard = '',
output_info = PipelineInfo(datasets=['unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
)
# Now collect all the sources for the mayavi registry.
sources = [open_3ds,
open_image,
open_plot3d,
open_vrml,
open_vtk,
open_vtk_xml,
parametric_surface,
point_load,
builtin_surface,
builtin_image,
open_poly_data,
open_ugrid_data,
open_volume,
open_chaco,
]
| bsd-3-clause |
le9i0nx/ansible | lib/ansible/plugins/action/ios_config.py | 13 | 4169 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ios import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
mrquim/mrquimrepo | script.module.nanscrapers/lib/nanscrapers/scraperplugins/flenix.py | 6 | 1943 | import re
import requests
import xbmc
import urllib
from ..scraper import Scraper
from ..common import clean_title, random_agent, clean_search
requests.packages.urllib3.disable_warnings()
session = requests.Session()
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class flenix(Scraper):
domains = ['https://flenix.net']
name = "Flenix"
sources = []
def __init__(self):
self.base_link = 'https://flenix.net/'
self.goog = 'https://www.google.co.uk'
self.sources = []
def scrape_movie(self, title, year, imdb, debrid=False):
try:
scrape = clean_search(title.lower()).replace(' ','+')
start_url = '%s/search?q=flenix.net+%s+%s' %(self.goog,scrape,year)
headers = {'User-Agent':random_agent()}
html = requests.get(start_url,headers=headers).content
results = re.compile('href="(.+?)"',re.DOTALL).findall(html)
for url in results:
if self.base_link in url:
if scrape.replace('+','-') in url:
ID = url.split('movies/')[1].split('-')[0]
print ':::::::::::::'+ID
headers = {'User-Agent': random_agent()}
page_url= 'https://flenix.net/movies/%s/watch/'%ID
page = session.get(page_url,headers=headers)
req_url = 'https://flenix.net/?do=player_ajax&id=%s&xfn=player2' %ID
end_url = session.get(req_url, headers=headers).content
link = end_url
self.sources.append({'source': 'DirectLink','quality': '720P','scraper': self.name,'url': link,'direct': True})
return self.sources
except Exception, argument:
return self.sources
| gpl-2.0 |
philanthropy-u/edx-platform | common/lib/xmodule/xmodule/studio_editable.py | 17 | 2291 | """
Mixin to support editing in Studio.
"""
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW, module_attr
class StudioEditableBlock(object):
"""
Helper methods for supporting Studio editing of XBlocks.
This class is only intended to be used with an XBlock!
"""
has_author_view = True
def render_children(self, context, fragment, can_reorder=False, can_add=False):
"""
Renders the children of the module with HTML appropriate for Studio. If can_reorder is True,
then the children will be rendered to support drag and drop.
"""
contents = []
for child in self.get_children(): # pylint: disable=no-member
if can_reorder:
context['reorderable_items'].add(child.location)
context['can_add'] = can_add
rendered_child = child.render(StudioEditableModule.get_preview_view_name(child), context)
fragment.add_fragment_resources(rendered_child)
contents.append({
'id': unicode(child.location),
'content': rendered_child.content
})
fragment.add_content(self.system.render_template("studio_render_children_view.html", { # pylint: disable=no-member
'items': contents,
'xblock_context': context,
'can_add': can_add,
'can_reorder': can_reorder,
}))
@staticmethod
def get_preview_view_name(block):
"""
Helper method for getting preview view name (student_view or author_view) for a given module.
"""
return AUTHOR_VIEW if has_author_view(block) else STUDENT_VIEW
StudioEditableModule = StudioEditableBlock
class StudioEditableDescriptor(object):
"""
Helper mixin for supporting Studio editing of xmodules.
This class is only intended to be used with an XModule Descriptor. This class assumes that the associated
XModule will have an "author_view" method for returning an editable preview view of the module.
"""
author_view = module_attr(AUTHOR_VIEW)
has_author_view = True
def has_author_view(descriptor):
"""
Returns True if the xmodule linked to the descriptor supports "author_view".
"""
return getattr(descriptor, 'has_author_view', False)
| agpl-3.0 |
funson/rt-xen | tools/python/xen/xend/XendDmesg.py | 49 | 1379 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2007 XenSource Inc.
#============================================================================
"""Get dmesg output for this node.
"""
import xen.lowlevel.xc
class XendDmesg:
def __init__(self):
self.xc = xen.lowlevel.xc.xc()
def info(self):
return self.xc.readconsolering()
def clear(self):
return self.xc.readconsolering(True)
def instance():
global inst
try:
inst
except:
inst = XendDmesg()
return inst
| gpl-2.0 |
Xeralux/tensorflow | tensorflow/python/keras/activations/__init__.py | 74 | 1856 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Activation functions.
from tensorflow.python.keras._impl.keras.activations import elu
from tensorflow.python.keras._impl.keras.activations import hard_sigmoid
from tensorflow.python.keras._impl.keras.activations import linear
from tensorflow.python.keras._impl.keras.activations import relu
from tensorflow.python.keras._impl.keras.activations import selu
from tensorflow.python.keras._impl.keras.activations import sigmoid
from tensorflow.python.keras._impl.keras.activations import softmax
from tensorflow.python.keras._impl.keras.activations import softplus
from tensorflow.python.keras._impl.keras.activations import softsign
from tensorflow.python.keras._impl.keras.activations import tanh
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.activations import deserialize
from tensorflow.python.keras._impl.keras.activations import serialize
from tensorflow.python.keras._impl.keras.activations import get
del absolute_import
del division
del print_function
| apache-2.0 |
tjwei/jedi | jedi/evaluate/analysis.py | 33 | 10812 | """
Module for statical analysis.
"""
from jedi import debug
from jedi.parser import tree
from jedi.evaluate.compiled import CompiledObject
CODES = {
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
'name-error': (2, NameError, 'Potential NameError.'),
'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."),
'type-error-too-many-arguments': (5, TypeError, None),
'type-error-too-few-arguments': (6, TypeError, None),
'type-error-keyword-argument': (7, TypeError, None),
'type-error-multiple-values': (8, TypeError, None),
'type-error-star-star': (9, TypeError, None),
'type-error-star': (10, TypeError, None),
'type-error-operation': (11, TypeError, None),
}
class Error(object):
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __unicode__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __str__(self):
return self.__unicode__()
def __eq__(self, other):
return (self.path == other.path and self.name == other.name
and self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
class Warning(Error):
pass
def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None):
from jedi.evaluate.iterable import MergedNodes
while isinstance(jedi_obj, MergedNodes):
if len(jedi_obj) != 1:
# TODO is this kosher?
return
jedi_obj = list(jedi_obj)[0]
exception = CODES[name][1]
if _check_for_exception_catch(evaluator, jedi_obj, exception, payload):
return
module_path = jedi_obj.get_parent_until().path
instance = typ(name, module_path, jedi_obj.start_pos, message)
debug.warning(str(instance))
evaluator.analysis.append(instance)
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_parent_until()
try:
stmts = module.used_names['setattr']
except KeyError:
return False
return any(instance.start_pos < stmt.start_pos < instance.end_pos
for stmt in stmts)
def add_attribute_error(evaluator, scope, name):
message = ('AttributeError: %s has no attribute %s.' % (scope, name))
from jedi.evaluate.representation import Instance
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
if isinstance(scope, Instance):
typ = Warning
try:
scope.get_subscope_by_name('__getattr__')
except KeyError:
try:
scope.get_subscope_by_name('__getattribute__')
except KeyError:
if not _check_for_setattr(scope):
typ = Error
else:
typ = Error
payload = scope, name
add(evaluator, 'attribute-error', name, message, typ, payload)
def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls, exception):
try:
return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj)
except TypeError:
return False
def check_try_for_except(obj, exception):
# Only nodes in try
iterator = iter(obj.children)
for branch_type in iterator:
colon = next(iterator)
suite = next(iterator)
if branch_type == 'try' \
and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos):
return False
for node in obj.except_clauses():
if node is None:
return True # An exception block that catches everything.
else:
except_classes = evaluator.eval_element(node)
for cls in except_classes:
from jedi.evaluate import iterable
if isinstance(cls, iterable.Array) and cls.type == 'tuple':
# multiple exceptions
for c in cls.values():
if check_match(c, exception):
return True
else:
if check_match(cls, exception):
return True
def check_hasattr(node, suite):
try:
assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos
assert node.type == 'power'
base = node.children[0]
assert base.type == 'name' and base.value == 'hasattr'
trailer = node.children[1]
assert trailer.type == 'trailer'
arglist = trailer.children[1]
assert arglist.type == 'arglist'
from jedi.evaluate.param import Arguments
args = list(Arguments(evaluator, arglist).unpack())
# Arguments should be very simple
assert len(args) == 2
# Check name
key, values = args[1]
assert len(values) == 1
names = evaluator.eval_element(values[0])
assert len(names) == 1 and isinstance(names[0], CompiledObject)
assert names[0].obj == str(payload[1])
# Check objects
key, values = args[0]
assert len(values) == 1
objects = evaluator.eval_element(values[0])
return payload[0] in objects
except AssertionError:
return False
obj = jedi_obj
while obj is not None and not obj.isinstance(tree.Function, tree.Class):
if obj.isinstance(tree.Flow):
# try/except catch check
if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception):
return True
# hasattr check
if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt):
if check_hasattr(obj.children[1], obj.children[3]):
return True
obj = obj.parent
return False
def get_module_statements(module):
"""
Returns the statements used in a module. All these statements should be
evaluated to check for potential exceptions.
"""
def check_children(node):
try:
children = node.children
except AttributeError:
return []
else:
nodes = []
for child in children:
nodes += check_children(child)
if child.type == 'trailer':
c = child.children
if c[0] == '(' and c[1] != ')':
if c[1].type != 'arglist':
if c[1].type == 'argument':
nodes.append(c[1].children[-1])
else:
nodes.append(c[1])
else:
for argument in c[1].children:
if argument.type == 'argument':
nodes.append(argument.children[-1])
elif argument.type != 'operator':
nodes.append(argument)
return nodes
def add_nodes(nodes):
new = set()
for node in nodes:
if isinstance(node, tree.Flow):
children = node.children
if node.type == 'for_stmt':
children = children[2:] # Don't want to include the names.
# Pick the suite/simple_stmt.
new |= add_nodes(children)
elif node.type in ('simple_stmt', 'suite'):
new |= add_nodes(node.children)
elif node.type in ('return_stmt', 'yield_expr'):
try:
new.add(node.children[1])
except IndexError:
pass
elif node.type not in ('whitespace', 'operator', 'keyword',
'parameters', 'decorated', 'except_clause') \
and not isinstance(node, (tree.ClassOrFunc, tree.Import)):
new.add(node)
try:
children = node.children
except AttributeError:
pass
else:
for next_node in children:
new.update(check_children(node))
if next_node.type != 'keyword' and node.type != 'expr_stmt':
new.add(node)
return new
nodes = set()
import_names = set()
decorated_funcs = []
for scope in module.walk():
for imp in set(scope.imports):
import_names |= set(imp.get_defined_names())
if imp.is_nested():
import_names |= set(path[-1] for path in imp.paths())
children = scope.children
if isinstance(scope, tree.ClassOrFunc):
children = children[2:] # We don't want to include the class name.
nodes |= add_nodes(children)
for flow in scope.flows:
if flow.type == 'for_stmt':
nodes.add(flow.children[3])
elif flow.type == 'try_stmt':
nodes.update(e for e in flow.except_clauses() if e is not None)
try:
decorators = scope.get_decorators()
except AttributeError:
pass
else:
if decorators:
decorated_funcs.append(scope)
return nodes, import_names, decorated_funcs
| mit |
TeachAtTUM/edx-platform | lms/djangoapps/course_goals/models.py | 3 | 2051 | """
Course Goals Models
"""
from django.contrib.auth.models import User
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from opaque_keys.edx.django.models import CourseKeyField
from model_utils import Choices
from .api import add_course_goal, remove_course_goal
from course_modes.models import CourseMode
from student.models import CourseEnrollment
# Each goal is represented by a goal key and a string description.
GOAL_KEY_CHOICES = Choices(
('certify', _('Earn a certificate')),
('complete', _('Complete the course')),
('explore', _('Explore the course')),
('unsure', _('Not sure yet')),
)
class CourseGoal(models.Model):
"""
Represents a course goal set by a user on the course home page.
"""
user = models.ForeignKey(User, blank=False)
course_key = CourseKeyField(max_length=255, db_index=True)
goal_key = models.CharField(max_length=100, choices=GOAL_KEY_CHOICES, default=GOAL_KEY_CHOICES.unsure)
def __unicode__(self):
return 'CourseGoal: {user} set goal to {goal} for course {course}'.format(
user=self.user.username,
course=self.course_key,
goal_key=self.goal_key,
)
class Meta:
unique_together = ("user", "course_key")
@receiver(models.signals.post_save, sender=CourseEnrollment, dispatch_uid="update_course_goal_on_enroll_change")
def update_course_goal_on_enroll_change(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""
Updates goals as follows on enrollment changes:
1) Set the course goal to 'certify' when the user enrolls as a verified user.
2) Remove the course goal when the user's enrollment is no longer active.
"""
course_id = str(instance.course_id).decode('utf8', 'ignore')
if not instance.is_active:
remove_course_goal(instance.user, course_id)
elif instance.mode == CourseMode.VERIFIED:
add_course_goal(instance.user, course_id, GOAL_KEY_CHOICES.certify)
| agpl-3.0 |
promptworks/horizon | openstack_dashboard/test/integration_tests/tests/test_user_settings.py | 37 | 2540 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
class TestUserSettings(helpers.TestCase):
def verify_user_settings_change(self, changed_settings):
language = self.settings_page.settings_form.language.value
timezone = self.settings_page.settings_form.timezone.value
pagesize = self.settings_page.settings_form.pagesize.value
loglines = self.settings_page.settings_form.loglines.value
user_settings = (("Language", changed_settings["language"], language),
("Timezone", changed_settings["timezone"], timezone),
("Pagesize", changed_settings["pagesize"], pagesize),
("Loglines", changed_settings["loglines"], loglines))
for (setting, expected, observed) in user_settings:
self.assertEqual(expected, observed,
"expected %s: %s, instead found: %s"
% (setting, expected, observed))
def test_user_settings_change(self):
"""tests the user's settings options:
* changes the system's language
* changes the timezone
* changes the number of items per page (page size)
* changes the number of log lines to be shown per instance
* verifies all changes were successfully executed
"""
self.settings_page = self.home_pg.go_to_settings_usersettingspage()
self.settings_page.change_language("es")
self.settings_page.change_timezone("Asia/Jerusalem")
self.settings_page.change_pagesize("30")
self.settings_page.change_loglines("50")
changed_settings = {"language": "es", "timezone": "Asia/Jerusalem",
"pagesize": "30", "loglines": "50"}
self.verify_user_settings_change(changed_settings)
self.settings_page.return_to_default_settings()
self.verify_user_settings_change(self.settings_page.DEFAULT_SETTINGS)
| apache-2.0 |
wesm/statsmodels | scikits/statsmodels/sandbox/tsa/examples/ex_mle_garch.py | 1 | 10649 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 01:01:50 2010
Author: josef-pktd
latest result
-------------
all are very close
garch0 has different parameterization of constant
ordering of parameters is different
seed 2780185
h.shape (2000,)
Optimization terminated successfully.
Current function value: 2093.813397
Iterations: 387
Function evaluations: 676
ggres.params [-0.6146253 0.1914537 0.01039355 0.78802188]
Optimization terminated successfully.
Current function value: 2093.972953
Iterations: 201
Function evaluations: 372
ggres0.params [-0.61537527 0.19635128 4.00706058]
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2093.972953
Iterations: 51
Function evaluations: 551
Gradient evaluations: 110
ggres0.params [-0.61537855 0.19635265 4.00694669]
Optimization terminated successfully.
Current function value: 2093.751420
Iterations: 103
Function evaluations: 187
[ 0.78671519 0.19692222 0.61457171]
-2093.75141963
Final Estimate:
LLH: 2093.750 norm LLH: 2.093750
omega alpha1 beta1
0.7867438 0.1970437 0.6145467
long run variance comparison
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
R
>>> 0.7867438/(1- 0.1970437- 0.6145467)
4.1757097302897526
Garch (gjr) asymetric, longrun var ?
>>> 1/(1-0.6146253 - 0.1914537 - 0.01039355) * 0.78802188
4.2937548579245242
>>> 1/(1-0.6146253 - 0.1914537 + 0.01039355) * 0.78802188
3.8569053452140345
Garch0
>>> (1-0.61537855 - 0.19635265) * 4.00694669
0.7543830449902722
>>> errgjr4.var() #for different random seed
4.0924199964716106
todo: add code and verify, check for longer lagpolys
"""
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import scikits.statsmodels.api as sm
from scikits.statsmodels.sandbox import tsa
from scikits.statsmodels.sandbox.tsa.garch import * # local import
nobs = 1000
examples = ['garch', 'rpyfit']
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print 'seed', seed
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print 'seed', seed
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.8,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation - DGP')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print 'ggres.params', ggres.params
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print 'ggres0.params', ggres0.params
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print 'ggres0.params', ggres0.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2])
print g11res
llf = loglike_GARCH11(g11res, errgjr4-errgjr4.mean())
print llf[0]
if 'rpyfit' in examples:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4-errgjr4.mean(), include_mean=False)
if 'rpysim' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print 'R acf', tsa.acf(np.power(x,2))[:15]
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print arma3res.params
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print arma3bres.params
xr = r.garchSim( n = 100)
x = np.asarray(xr)
ggmod = Garch(x-x.mean())
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print 'ggres.params', ggres.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, x-x.mean())[0], [0.6, 0.6, 0.2])
print g11res
llf = loglike_GARCH11(g11res, x-x.mean())
print llf[0]
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
fit = r.garchFit(f, data = x-x.mean(), include_mean=False, trace=False)
print r.summary(fit)
'''based on R default simulation
model = list(omega = 1e-06, alpha = 0.1, beta = 0.8)
nobs = 1000
(with nobs=500, gjrgarch doesn't do well
>>> ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
Optimization terminated successfully.
Current function value: -448.861335
Iterations: 385
Function evaluations: 690
>>> print 'ggres.params', ggres.params
ggres.params [ -7.75090330e-01 1.57714749e-01 -9.60223930e-02 8.76021411e-07]
rearranged
8.76021411e-07 1.57714749e-01(-9.60223930e-02) 7.75090330e-01
>>> print g11res
[ 2.97459808e-06 7.83128600e-01 2.41110860e-01]
>>> llf = loglike_GARCH11(g11res, x-x.mean())
>>> print llf[0]
442.603541936
Log Likelihood:
-448.9376 normalized: -4.489376
omega alpha1 beta1
1.01632e-06 1.02802e-01 7.57537e-01
'''
''' the following is for errgjr4-errgjr4.mean()
ggres.params [-0.54510407 0.22723132 0.06482633 0.82325803]
Final Estimate:
LLH: 2065.56 norm LLH: 2.06556
mu omega alpha1 beta1
0.07229732 0.83069480 0.26313883 0.53986167
ggres.params [-0.50779163 0.2236606 0.00700036 1.154832
Final Estimate:
LLH: 2116.084 norm LLH: 2.116084
mu omega alpha1 beta1
-4.759227e-17 1.145404e+00 2.288348e-01 5.085949e-01
run3
DGP
0.4/?? 0.8 0.7
gjrgarch:
ggres.params [-0.45196579 0.2569641 0.02201904 1.11942636]
rearranged
const/omega ma1/alpha1 ar1/beta1
1.11942636 0.2569641(+0.02201904) 0.45196579
g11:
[ 1.10262688 0.26680468 0.45724957]
-2055.73912687
R:
Final Estimate:
LLH: 2055.738 norm LLH: 2.055738
mu omega alpha1 beta1
-1.665226e-17 1.102396e+00 2.668712e-01 4.573224e-01
fit = r.garchFit(f, data = errgjr4-errgjr4.mean())
rpy.RPy_RException: Error in solve.default(fit$hessian) :
Lapack routine dgesv: system is exactly singular
run4
DGP:
mu=0.4, scale=1.01
ma = [[1., 0, 0],[0, 0.8,0.0]], ar = [1.0, -0.7]
maybe something wrong with simulation
gjrgarch
ggres.params [-0.50554663 0.24449867 -0.00521004 1.00796791]
rearranged
1.00796791 0.24449867(-0.00521004) 0.50554663
garch11:
[ 1.01258264 0.24149155 0.50479994]
-2056.3877404
R include_constant=False
Final Estimate:
LLH: 2056.397 norm LLH: 2.056397
omega alpha1 beta1
1.0123560 0.2409589 0.5049154
'''
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
if 'sp500' in examples:
import tabular as tb
import scikits.timeseries as ts
a = tb.loadSV(r'C:\Josef\work-oth\gspc_table.csv')
s = ts.time_series(a[0]['Close'][::-1],
dates=ts.date_array(a[0]['Date'][::-1],freq="D"))
sp500 = a[0]['Close'][::-1]
sp500r = np.diff(np.log(sp500))
#plt.show()
| bsd-3-clause |
JCardenasRdz/Machine-Learning-4-MRI | Infection_vs_Inflammation/Code/Process_Data.py | 1 | 2713 | # Import Modules as needed
import numpy as np
#import seaborn as sn
import pandas as pd
from pylab import *
from mylocal_functions import *
# ======== T2 MSME============= #
# Make list of all T2.txt files
T2_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2.txt')
# Allocate variables needed for analysis
T2DF=pd.DataFrame()
TR=np.linspace(.012,.012*12,12)
# Fit T2 and construct dataframe
for names in T2_list:
#Convert txt file to array
YDataMatrix=txt_2_array(names)
#Estimate T2
T2time=fitT2(TR,YDataMatrix)
#convert to data frame
df_T2=pd.DataFrame(T2time.T,columns=["Infected","Healthy_R","St_Inf","Healthy_L"])
#df_T2=pd.DataFrame(T2time.T,columns=["ROI-1","ROI-2","ROI-3","ROI-4"])
df_info=name_2_df(names)
df_final=pd.concat([df_T2,df_info], axis=1)
T2DF=T2DF.append(df_final,ignore_index=True)
# Plot T2 Density ROIs 1 and 2
#T2DF[T2DF.Slice==1].iloc[:,:4].plot.density(); title("Slice 01"); xlim((0.025,.15))
#T2DF[T2DF.Slice==2].iloc[:,:4].plot.density(); title("Slice 02"); xlim((0.025,.15))
#T2DF[T2DF.Slice==3].iloc[:,:4].plot.density(); title("Slice 03"); xlim((0.025,.15))
#T2DF[T2DF.Slice==4].iloc[:,:4].plot.density(); title("Slice 04"); xlim((0.025,.15))
#T2DF[T2DF.Slice==5].iloc[:,:4].plot.density(); title("Slice 05"); xlim((0.025,.15))
# ======== CEST============= #
# Make list of all T2.txt files
CEST_list=get_ipython().getoutput('ls ../Study_03_CBA/*CEST.txt')
CEST_DF=pd.DataFrame()
Z=np.zeros((4,110))
def normalize_data(DataMatrix):
rows,cols = DataMatrix.shape
newData = np.zeros_like(DataMatrix)
for row in range(rows):
newData[row,:]=DataMatrix[row,:]/DataMatrix[row,8]
return newData
for names in CEST_list:
#Convert txt file to array
D=txt_2_array(names);
Zn=normalize_data(D.T)
Z=np.concatenate((Z,Zn))
Z=Z[4::,9::]
# define offsets in ppm
a1=np.linspace(-55,-50,9)
ppm=np.linspace(-8,8,101)
full_ppm = np.concatenate((a1, ppm))
# fit CEST data.
y=Z[12,:]
p=fit_L2_scale(ppm,y)
Yhat=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
plt.figure(figsize=(10,6))
plt.plot(ppm,y,'o',label='Signal');
plt.plot(ppm,1-Yhat,'-',label='Fit');
plt.legend()
## ====== BUILD CEST Predictors ======== #####
CEST_predictors=np.zeros_like(Z)
rows,cols = CEST_predictors.shape
Tissue_Class=np.zeros((4,rows))
for i in range(rows):
p=fit_L2_scale(ppm,Z[i,:])
CEST_predictors[i,:]=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
Tissue_Class=np.zeros((64,1))
for i in range(4):
Tissue_Class[i::4]=i
CEST_Dataframe=pd.DataFrame(CEST_predictors)
CEST_Dataframe["Tissue_Class"]=Tissue_Class
pd.DataFrame.to_csv(CEST_Dataframe,"CEST_infections.csv",header=True,index=False)
| mit |
IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| apache-2.0 |
areski/django | docs/_ext/djangodocs.py | 17 | 11772 | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.compat import Directive
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
code = node.rawsource.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append(
'\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (
# Some filenames have '_', which is special in latex.
fname.replace('_', r'\_'),
)
)
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
# Prevent rawsource from appearing in output a second time.
raise nodes.SkipNode
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
pass
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
| bsd-3-clause |
laborautonomo/poedit | deps/boost/tools/build/v2/test/source_locations.py | 51 | 1125 | #!/usr/bin/python
# Copyright (C) Craig Rodrigues 2005.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that projects with multiple source-location directories are handled OK.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """
path-constant SRC1 : "./src1" ;
path-constant SRC2 : "./src2" ;
path-constant SRC3 : "./src3" ;
path-constant BUILD : "build" ;
project : requirements <include>$(SRC1)/include <threading>multi
: build-dir $(BUILD) ;
build-project project1 ;
""")
t.write("project1/jamfile.jam", """
project project1 : source-location $(SRC1) $(SRC2) $(SRC3) ;
SRCS = s1.cpp s2.cpp testfoo.cpp ;
exe test : $(SRCS) ;
""")
t.write("src1/s1.cpp", "int main() {}\n")
t.write("src2/s2.cpp", "void hello() {}\n")
t.write("src3/testfoo.cpp", "void testfoo() {}\n")
# This file should not be picked up, because "src2" is before "src3" in the list
# of source directories.
t.write("src3/s2.cpp", "void hello() {}\n")
t.run_build_system()
t.cleanup()
| mit |
coolbombom/CouchPotatoServer | couchpotato/core/providers/torrent/torrentday/main.py | 9 | 2189 | from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class TorrentDay(TorrentProvider):
urls = {
'test': 'http://www.td.af/',
'login': 'http://www.td.af/torrents/',
'login_check': 'http://www.torrentday.com/userdetails.php',
'detail': 'http://www.td.af/details.php?id=%s',
'search': 'http://www.td.af/V3/API/API.php',
'download': 'http://www.td.af/download.php/%s/%s',
}
cat_ids = [
([11], ['720p', '1080p']),
([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([3], ['dvdr']),
([5], ['bd50']),
]
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s"' % (title, movie['library']['year'])
params = {
'/browse.php?': None,
'cata': 'yes',
'jxt': 8,
'jxw': 'b',
'search': q,
}
data = self.getJsonData(self.urls['search'], params = params, opener = self.login_opener)
try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except: return
for torrent in torrents:
results.append({
'id': torrent['id'],
'name': torrent['name'],
'url': self.urls['download'] % (torrent['id'], torrent['fname']),
'detail_url': self.urls['detail'] % torrent['id'],
'size': self.parseSize(torrent.get('size')),
'seeders': tryInt(torrent.get('seed')),
'leechers': tryInt(torrent.get('leech')),
})
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'submit',
})
def loginSuccess(self, output):
return 'Password not correct' not in output
def loginCheckSuccess(self, output):
return 'logout.php' in output.lower()
| gpl-3.0 |
s-tar/just-a-chat | modules/chat/chat.py | 1 | 5141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mr.S'
from kernel.module import Module
from kernel.server import app
from bottle import jinja2_template as template, request
from entities.s_chat import Chat
from entities.s_message import Message
from kernel.validator import Validator
from kernel.socket import Rooms
import opengraph
import urlparse
import kernel.widget
module = Module('chat', route="/chat")
@app.route('/')
@app.route('/chat/<chat_id:int>')
def main(chat_id=None):
user = request.user.get()
if user:
chats = request.db(Chat).get_all_by_user(user)
current_chat = request.db(Chat).get_by_id(chat_id) if chat_id else None
return template('page', {'content': template('chat/main', {
'chats': chats,
'chat_id': chat_id,
'current_chat': current_chat,
'current_is_new': current_chat and current_chat not in chats
})})
return template('page', {'content': template('index')})
@module.post('/new')
def new_chat():
user = request.user.get()
if user:
data = request.forms
v = Validator(data)
v.field("chat_name").required(message='Назовите как-то чат')
if v.is_valid():
data = v.valid_data
chat = Chat()
chat.name = data.get("chat_name")
chat.members.append(user)
request.db.add(chat)
request.db.flush()
request.db.commit()
Rooms.get('user.'+str(user.usr_id)).emit('chat.new', {
'chat': chat.as_dict(),
'chat_item': kernel.widget.get('chat.item', {'chat': chat})})
return {"status": "ok"}
return {"status": "fail", "errors": v.errors}
@module.post('/<chat_id:int>/join')
def join_chat(chat_id=None):
user = request.user.get()
if user:
chat = request.db(Chat).get_by_id(chat_id)
if chat and user not in chat.members:
chat.members.append(user)
request.db.add(chat)
request.db.commit()
Rooms.get('user.'+str(user.usr_id)).emit('chat.join', { "chat": chat.as_dict(),
"chat_item": kernel.widget.get('chat.item', {'chat': chat})})
new_message(request.db, chat, '%s %s присоединяется к чату.' % (user.usr_firstname, user.usr_lastname), user, True)
return {"status": "ok",
"chat": chat.as_dict(),
"chat_item": kernel.widget.get('chat.item', {'chat': chat}),
"messages": kernel.widget.get('chat.messages', {'chat_id': chat.id})
}
return {"status": "fail"}
@module.post('/<chat_id:int>/leave')
def leave_chat(chat_id=None):
user = request.user.get()
if user:
chat = request.db(Chat).get_by_id(chat_id)
if chat:
chat.members.remove(user)
if len(chat.members) == 0:
chat.deleted = True
request.db.add(chat)
request.db.commit()
new_message(request.db, chat, '%s %s покидает чат.' % (user.usr_firstname, user.usr_lastname), user, True)
return {"status": "ok"}
return {"status": "fail"}
@module.post('/new_message')
def new_message_route():
user = request.user.get()
if user:
data = request.forms
v = Validator(data)
v.field("chat_id").integer()
if v.valid_data.get('chat_id'):
data = v.valid_data
chat = request.db(Chat).get_by_id(data.get('chat_id'))
if chat:
text = data.get('message').strip()
new_message(request.db, chat, text, user)
return {"status": "ok"}
return {"status": "fail"}
@module.post('/search')
def search():
user = request.user.get()
text = request.forms.get('text')
chats = request.db().query(Chat).filter(Chat.deleted == False, Chat.name.ilike(text.strip()+'%'), ~Chat.members.contains(user)).all()
return {
'chats': [c.as_dict() for c in chats],
'chat_items': [kernel.widget.get('chat.item', {'chat': chat}) for chat in chats]
}
def to_url(text):
text = 'http://'+text if text.startswith('www.') else text
return text if text.startswith('http://') or text.startswith('https://') else None
def new_message(db, chat, text, user, system=False):
data = None
url = to_url(text)
if url:
try:
og = opengraph.OpenGraph(url=url)
text = url
data = str(og if og.is_valid() else {})
except:
data = str({})
message = Message()
message.chat = chat
message.text = text
message.data = data
message.sender = user
message.is_system = system
chat.messages.append(message)
db.add(chat)
db.flush()
db.commit()
for member in chat.members:
Rooms.get('user.'+str(member.usr_id)).emit('chat.new_message', {
'is_sender': member.usr_id == user.usr_id,
'message': message.as_dict(),
'message_item': kernel.widget.get('chat.message', {'message': message})}) | mit |
BeataBak/project-euler-problems | 008.py | 1 | 3404 | """
Project Euler Problem 8
=======================
Find the greatest product of thirteen consecutive digits in the 1000-digit
number.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
"""
from functools import reduce
BIG_CUBE = ''.join("""
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
""".split())
def scoper(s, width, pos=0):
"""
Takes a string and a width, and yields consecutive chunks of `s`
to the size of `width` until we hit the end of `s`.
"""
while True:
yield s[pos:pos + width]
if pos + width == len(s):
break
pos += 1
def product_of_string(s):
"""
Takes a string containing integers and returns the product.
"""
return reduce(lambda x, y: x * y, [int(i) for i in s])
def main(length=13):
return max([product_of_string(s) for s in scoper(BIG_CUBE, length)])
def test_scoper():
assert list(scoper('Beata', 2)) == ['Be', 'ea', 'at', 'ta']
assert list(scoper('Beata', 3)) == ['Bea', 'eat', 'ata']
def test_product_of_string():
assert product_of_string('245') == 40
def test_main():
assert main(4) == 5832
print(main())
| mit |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/db/versions/v1_0_0/domain/log.py | 1 | 3092 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from auto_gen import DBLog as _DBLog
from auto_gen import DBAbstraction, DBModule, DBGroup, DBLoopExec, \
DBGroupExec, DBModuleExec
from id_scope import IdScope
import copy
class DBLog(_DBLog):
def __init__(self, *args, **kwargs):
_DBLog.__init__(self, *args, **kwargs)
self.id_scope = IdScope(1,
{DBLoopExec.vtType: 'item_exec',
DBModuleExec.vtType: 'item_exec',
DBGroupExec.vtType: 'item_exec',
DBAbstraction.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
def __copy__(self):
return DBLog.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBLog.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBLog
cp.id_scope = copy.copy(self.id_scope)
return cps
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBLog()
new_obj = _DBLog.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
return new_obj
def update_id_scope(self):
pass
| bsd-3-clause |
hemikak/andes | modules/andes-core/tools/bin/testkit.py | 25 | 10380 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time, string, traceback
from brokertest import *
from qpid.messaging import *
try:
import java.lang.System
_cp = java.lang.System.getProperty("java.class.path");
except ImportError:
_cp = checkenv("QP_CP")
class Formatter:
def __init__(self, message):
self.message = message
self.environ = {"M": self.message,
"P": self.message.properties,
"C": self.message.content}
def __getitem__(self, st):
return eval(st, self.environ)
# The base test case has support for launching the generic
# receiver and sender through the TestLauncher with all the options.
#
class JavaClientTest(BrokerTest):
"""Base Case for Java Test cases"""
client_class = "org.wso2.andes.testkit.TestLauncher"
# currently there is no transparent reconnection.
# temp hack: just creating the queue here and closing it.
def start_error_watcher(self,broker=None):
ssn = broker.connect().session()
err_watcher = ssn.receiver("control; {create:always}", capacity=1)
ssn.close()
def store_module_args(self):
if BrokerTest.store_lib:
return ["--load-module", BrokerTest.store_lib]
else:
print "Store module not present."
return [""]
def client(self,**options):
cmd = ["java","-cp",_cp]
cmd += ["-Dtest_name=" + options.get("test_name", "UNKNOWN")]
cmd += ["-Dhost=" + options.get("host","127.0.0.1")]
cmd += ["-Dport=" + str(options.get("port",5672))]
cmd += ["-Dcon_count=" + str(options.get("con_count",1))]
cmd += ["-Dssn_per_con=" + str(options.get("ssn_per_con",1))]
cmd += ["-Duse_unique_dests=" + str(options.get("use_unique_dests",False))]
cmd += ["-Dcheck_for_dups=" + str(options.get("check_for_dups",False))]
cmd += ["-Ddurable=" + str(options.get("durable",False))]
cmd += ["-Dtransacted=" + str(options.get("transacted",False))]
cmd += ["-Dreceiver=" + str(options.get("receiver",False))]
cmd += ["-Dsync_rcv=" + str(options.get("sync_rcv",False))]
cmd += ["-Dsender=" + str(options.get("sender",False))]
cmd += ["-Dmsg_size=" + str(options.get("msg_size",256))]
cmd += ["-Dtx_size=" + str(options.get("tx_size",10))]
cmd += ["-Dmsg_count=" + str(options.get("msg_count",1000))]
cmd += ["-Dmax_prefetch=" + str(options.get("max_prefetch",500))]
cmd += ["-Dsync_ack=" + str(options.get("sync_ack",False))]
cmd += ["-Dsync_persistence=" + str(options.get("sync_pub",False))]
cmd += ["-Dsleep_time=" + str(options.get("sleep_time",1000))]
cmd += ["-Dfailover=" + options.get("failover", "failover_exchange")]
cmd += ["-Djms_durable_sub=" + str(options.get("jms_durable_sub", False))]
cmd += ["-Dlog.level=" + options.get("log.level", "warn")]
cmd += [self.client_class]
cmd += [options.get("address", "my_queue; {create: always}")]
#print str(options.get("port",5672))
return cmd
# currently there is no transparent reconnection.
# temp hack: just creating a receiver and closing session soon after.
def monitor_clients(self,broker=None,run_time=600,error_ck_freq=60):
ssn = broker.connect().session()
err_watcher = ssn.receiver("control; {create:always}", capacity=1)
i = run_time/error_ck_freq
is_error = False
for j in range(i):
not_empty = True
while not_empty:
try:
m = err_watcher.fetch(timeout=error_ck_freq)
ssn.acknowledge()
print "Java process notified of an error"
self.print_error(m)
is_error = True
except messaging.Empty, e:
not_empty = False
ssn.close()
return is_error
def print_error(self,msg):
print msg.properties.get("exception-trace")
def verify(self, receiver,sender):
sender_running = receiver.is_running()
receiver_running = sender.is_running()
self.assertTrue(receiver_running,"Receiver has exited prematually")
self.assertTrue(sender_running,"Sender has exited prematually")
def start_sender_and_receiver(self,**options):
receiver_opts = options
receiver_opts["receiver"]=True
receiver = self.popen(self.client(**receiver_opts),
expect=EXPECT_RUNNING)
sender_opts = options
sender_opts["sender"]=True
sender = self.popen(self.client(**sender_opts),
expect=EXPECT_RUNNING)
return receiver, sender
def start_cluster(self,count=2,expect=EXPECT_RUNNING,**options):
if options.get("durable",False)==True:
cluster = Cluster(self, count=count, expect=expect, args=self.store_module_args())
else:
cluster = Cluster(self, count=count)
return cluster
class ConcurrencyTest(JavaClientTest):
"""A concurrency test suite for the JMS client"""
skip = False
def base_case(self,**options):
if self.skip :
print "Skipping test"
return
cluster = self.start_cluster(count=2,**options)
self.start_error_watcher(broker=cluster[0])
options["port"] = port=cluster[0].port()
options["use_unique_dests"]=True
options["address"]="amq.topic"
receiver, sender = self.start_sender_and_receiver(**options)
self.monitor_clients(broker=cluster[0],run_time=180)
self.verify(receiver,sender)
def test_multiplexing_con(self):
"""Tests multiple sessions on a single connection"""
self.base_case(ssn_per_con=25,test_name=self.id())
def test_multiplexing_con_with_tx(self):
"""Tests multiple transacted sessions on a single connection"""
self.base_case(ssn_per_con=25,transacted=True,test_name=self.id())
def test_multiplexing_con_with_sync_rcv(self):
"""Tests multiple sessions with sync receive"""
self.base_case(ssn_per_con=25,sync_rcv=True,test_name=self.id())
def test_multiplexing_con_with_durable_sub(self):
"""Tests multiple sessions with durable subs"""
self.base_case(ssn_per_con=25,durable=True,jms_durable_sub=True,test_name=self.id())
def test_multiplexing_con_with_sync_ack(self):
"""Tests multiple sessions with sync ack"""
self.base_case(ssn_per_con=25,sync_ack=True,test_name=self.id())
def test_multiplexing_con_with_sync_pub(self):
"""Tests multiple sessions with sync pub"""
self.base_case(ssn_per_con=25,sync_pub=True,durable=True,test_name=self.id())
def test_multiple_cons_and_ssns(self):
"""Tests multiple connections and sessions"""
self.base_case(con_count=10,ssn_per_con=25,test_name=self.id())
class SoakTest(JavaClientTest):
"""A soak test suite for the JMS client"""
def base_case(self,**options):
cluster = self.start_cluster(count=4, expect=EXPECT_EXIT_FAIL,**options)
options["port"] = port=cluster[0].port()
self.start_error_watcher(broker=cluster[0])
options["use_unique_dests"]=True
options["address"]="amq.topic"
receiver,sender = self.start_sender_and_receiver(**options)
is_error = self.monitor_clients(broker=cluster[0],run_time=30,error_ck_freq=30)
if (is_error):
print "The sender or receiver didn't start properly. Exiting test."
return
else:
"Print no error !"
# grace period for java clients to get the failover properly setup.
time.sleep(30)
error_msg= None
# Kill original brokers, start new ones.
try:
for i in range(8):
cluster[i].kill()
b=cluster.start()
self.monitor_clients(broker=b,run_time=30,error_ck_freq=30)
print "iteration : " + str(i)
except ConnectError, e1:
error_msg = "Unable to connect to new cluster node : " + traceback.format_exc(e1)
except SessionError, e2:
error_msg = "Session error while connected to new cluster node : " + traceback.format_exc(e2)
self.verify(receiver,sender)
if error_msg:
raise Exception(error_msg)
def test_failover(self) :
"""Test basic failover"""
self.base_case(test_name=self.id())
def test_failover_with_durablesub(self):
"""Test failover with durable subscriber"""
self.base_case(durable=True,jms_durable_sub=True,test_name=self.id())
def test_failover_with_sync_rcv(self):
"""Test failover with sync receive"""
self.base_case(sync_rcv=True,test_name=self.id())
def test_failover_with_sync_ack(self):
"""Test failover with sync ack"""
self.base_case(sync_ack=True,test_name=self.id())
def test_failover_with_noprefetch(self):
"""Test failover with no prefetch"""
self.base_case(max_prefetch=1,test_name=self.id())
def test_failover_with_multiple_cons_and_ssns(self):
"""Test failover with multiple connections and sessions"""
self.base_case(use_unique_dests=True,address="amq.topic",
con_count=10,ssn_per_con=25,test_name=self.id())
| apache-2.0 |
kcpawan/django | django/contrib/sites/migrations/0001_initial.py | 276 | 1096 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=100, verbose_name='domain name', validators=[_simple_domain_name_validator])),
('name', models.CharField(max_length=50, verbose_name='display name')),
],
options={
'ordering': ('domain',),
'db_table': 'django_site',
'verbose_name': 'site',
'verbose_name_plural': 'sites',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.sites.models.SiteManager()),
],
),
]
| bsd-3-clause |
Yukarumya/Yukarum-Redfoxes | testing/marionette/harness/marionette_harness/tests/unit/test_mouse_action.py | 1 | 4981 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver.by import By
from marionette_driver.keys import Keys
from marionette_driver.marionette import Actions
from marionette_harness import MarionetteTestCase
class TestMouseAction(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
if self.marionette.session_capabilities["platformName"] == "darwin":
self.mod_key = Keys.META
else:
self.mod_key = Keys.CONTROL
self.action = Actions(self.marionette)
def test_click_action(self):
test_html = self.marionette.absolute_url("test.html")
self.marionette.navigate(test_html)
link = self.marionette.find_element(By.ID, "mozLink")
self.action.click(link).perform()
self.assertEqual("Clicked", self.marionette.execute_script(
"return document.getElementById('mozLink').innerHTML"))
def test_clicking_element_out_of_view_succeeds(self):
# The action based click doesn"t check for visibility.
test_html = self.marionette.absolute_url("hidden.html")
self.marionette.navigate(test_html)
el = self.marionette.find_element(By.ID, "child")
self.action.click(el).perform()
def test_double_click_action(self):
test_html = self.marionette.absolute_url("double_click.html")
self.marionette.navigate(test_html)
el = self.marionette.find_element(By.ID, "one-word-div")
self.action.double_click(el).perform()
el.send_keys(self.mod_key + "c")
rel = self.marionette.find_element(By.ID, "input-field")
rel.send_keys(self.mod_key + "v")
self.assertEqual("zyxw", rel.get_property("value"))
def test_context_click_action(self):
test_html = self.marionette.absolute_url("javascriptPage.html")
self.marionette.navigate(test_html)
click_el = self.marionette.find_element(By.ID, "resultContainer")
def context_menu_state():
with self.marionette.using_context("chrome"):
cm_el = self.marionette.find_element(By.ID, "contentAreaContextMenu")
return cm_el.get_property("state")
self.assertEqual("closed", context_menu_state())
self.action.context_click(click_el).perform()
self.wait_for_condition(lambda _: context_menu_state() == "open")
with self.marionette.using_context("chrome"):
self.marionette.find_element(By.ID, "main-window").send_keys(Keys.ESCAPE)
self.wait_for_condition(lambda _: context_menu_state() == "closed")
def test_middle_click_action(self):
test_html = self.marionette.absolute_url("clicks.html")
self.marionette.navigate(test_html)
self.marionette.find_element(By.ID, "addbuttonlistener").click()
el = self.marionette.find_element(By.ID, "showbutton")
self.action.middle_click(el).perform()
self.wait_for_condition(lambda _: el.get_property("innerHTML") == "1")
def test_chrome_click(self):
self.marionette.navigate("about:blank")
data_uri = "data:text/html,<html></html>"
with self.marionette.using_context("chrome"):
urlbar = self.marionette.find_element(By.ID, "urlbar")
urlbar.send_keys(data_uri)
go_button = self.marionette.find_element(By.ID, "urlbar-go-button")
self.action.click(go_button).perform()
self.wait_for_condition(lambda mn: mn.get_url() == data_uri)
def test_chrome_double_click(self):
self.marionette.navigate("about:blank")
test_word = "quux"
with self.marionette.using_context("chrome"):
urlbar = self.marionette.find_element(By.ID, "urlbar")
self.assertEqual("", urlbar.get_property("value"))
urlbar.send_keys(test_word)
self.assertEqual(urlbar.get_property("value"), test_word)
(self.action.double_click(urlbar).perform()
.key_down(self.mod_key)
.key_down("x").perform())
self.assertEqual(urlbar.get_property("value"), "")
def test_chrome_context_click_action(self):
self.marionette.set_context("chrome")
def context_menu_state():
cm_el = self.marionette.find_element(By.ID, "tabContextMenu")
return cm_el.get_property("state")
currtab = self.marionette.execute_script("return gBrowser.selectedTab")
self.assertEqual("closed", context_menu_state())
self.action.context_click(currtab).perform()
self.wait_for_condition(lambda _: context_menu_state() == "open")
(self.marionette.find_element(By.ID, "main-window")
.send_keys(Keys.ESCAPE))
self.wait_for_condition(lambda _: context_menu_state() == "closed")
| mpl-2.0 |
alikins/ansible | lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py | 27 | 7537 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: profitbricks_datacenter
short_description: Create or destroy a ProfitBricks Virtual Datacenter.
description:
- This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
on profitbricks >= 1.0.0
version_added: "2.0"
options:
name:
description:
- The name of the virtual datacenter.
required: true
description:
description:
- The description of the virtual datacenter.
required: false
location:
description:
- The datacenter location.
required: false
default: us/las
choices: [ "us/las", "de/fra", "de/fkb" ]
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false
wait:
description:
- wait for the datacenter to be created before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- create or terminate datacenters
required: false
default: 'present'
choices: [ "present", "absent" ]
requirements: [ "profitbricks" ]
author: Matt Baldwin (baldwin@stackpointcloud.com)
'''
EXAMPLES = '''
# Create a Datacenter
- profitbricks_datacenter:
datacenter: Tardis One
wait_timeout: 500
# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter.
- profitbricks_datacenter:
datacenter: Tardis One
wait_timeout: 500
state: absent
'''
import re
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService, Datacenter
except ImportError:
HAS_PB_SDK = False
from ansible.module_utils.basic import AnsibleModule
LOCATIONS = ['us/las',
'de/fra',
'de/fkb']
uuid_match = re.compile(
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def _remove_datacenter(module, profitbricks, datacenter):
try:
profitbricks.delete_datacenter(datacenter)
except Exception as e:
module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
def create_datacenter(module, profitbricks):
"""
Creates a Datacenter
This will create a new Datacenter in the specified location.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if a new datacenter was created, false otherwise
"""
name = module.params.get('name')
location = module.params.get('location')
description = module.params.get('description')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
i = Datacenter(
name=name,
location=location,
description=description
)
try:
datacenter_response = profitbricks.create_datacenter(datacenter=i)
if wait:
_wait_for_completion(profitbricks, datacenter_response,
wait_timeout, "_create_datacenter")
results = {
'datacenter_id': datacenter_response['id']
}
return results
except Exception as e:
module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
def remove_datacenter(module, profitbricks):
"""
Removes a Datacenter.
This will remove a datacenter.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the datacenter was deleted, false otherwise
"""
name = module.params.get('name')
changed = False
if(uuid_match.match(name)):
_remove_datacenter(module, profitbricks, name)
changed = True
else:
datacenters = profitbricks.list_datacenters()
for d in datacenters['items']:
vdc = profitbricks.get_datacenter(d['id'])
if name == vdc['properties']['name']:
name = d['id']
_remove_datacenter(module, profitbricks, name)
changed = True
return changed
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
description=dict(),
location=dict(choices=LOCATIONS, default='us/las'),
subscription_user=dict(),
subscription_password=dict(no_log=True),
wait=dict(type='bool', default=True),
wait_timeout=dict(default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
try:
(changed) = remove_datacenter(module, profitbricks)
module.exit_json(
changed=changed)
except Exception as e:
module.fail_json(msg='failed to set datacenter state: %s' % str(e))
elif state == 'present':
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for a new datacenter')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for a new datacenter')
try:
(datacenter_dict_array) = create_datacenter(module, profitbricks)
module.exit_json(**datacenter_dict_array)
except Exception as e:
module.fail_json(msg='failed to set datacenter state: %s' % str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
sanketloke/scikit-learn | sklearn/gaussian_process/gpr.py | 43 | 18642 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard sklearn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
tmenjo/cinder-2015.1.0 | cinder/tests/test_backup.py | 3 | 33268 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup code.
"""
import tempfile
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.backup import fake_service_with_verify as fake_service
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
side_effect = lambda value: value
mock_decorator = mock.MagicMock(side_effect=side_effect)
mock_trace_cls.return_value = mock_decorator
self.backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
def _create_backup_db_entry(self, volume_id=1, display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
size=1,
object_count=0,
project_id='fake'):
"""Create a backup entry in the DB.
Return the entry ID
"""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = project_id
backup['host'] = 'testhost'
backup['availability_zone'] = '1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = container
backup['status'] = status
backup['fail_reason'] = ''
backup['service'] = CONF.backup_driver
backup['snapshot'] = False
backup['parent_id'] = None
backup['size'] = size
backup['object_count'] = object_count
return db.backup_create(self.ctxt, backup)['id']
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
size=1):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = 'testhost'
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
return db.volume_create(self.ctxt, vol)['id']
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': 'attached', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
'/dev/vd0')
def _create_exported_record_entry(self, vol_size=1):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id)
return export
def _create_export_record_db_entry(self,
volume_id='0000',
status='creating',
project_id='fake'):
"""Create a backup entry in the DB.
Return the entry ID
"""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = project_id
backup['status'] = status
return db.backup_create(self.ctxt, backup)['id']
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
def test_init_host(self):
"""Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
backup1_id = self._create_backup_db_entry(status='creating')
backup2_id = self._create_backup_db_entry(status='restoring')
backup3_id = self._create_backup_db_entry(status='deleting')
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual(vol1['status'], 'available')
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual(vol2['status'], 'error_restoring')
backup1 = db.backup_get(self.ctxt, backup1_id)
self.assertEqual(backup1['status'], 'error')
backup2 = db.backup_get(self.ctxt, backup2_id)
self.assertEqual(backup2['status'], 'available')
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3_id)
def test_create_backup_with_bad_volume_status(self):
"""Test error handling when creating a backup from a volume
with a bad status
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
def test_create_backup_with_bad_backup_status(self):
"""Test error handling when creating a backup with a backup
with a bad status
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
self.assertTrue(_mock_volume_backup.called)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup(self, _mock_volume_backup):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_notify(self, _mock_volume_backup, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id)
self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_driver_error(self, _mock_volume_restore):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error_restoring')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup(self, _mock_volume_restore):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
display_name='fail_on_delete',
volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_bad_service(self):
"""Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_no_service(self):
"""Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
db.backup_update(self.ctxt, backup_id, {'service': None})
self.backup_mgr.delete_backup(self.ctxt, backup_id)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup_id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup_id)
self.assertEqual(backup.deleted, True)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(backup.status, 'deleted')
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 0)
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, b2)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
self.assertEqual(len(backups), 2)
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(len(backups), 2)
def test_backup_manager_driver_name(self):
""""Test mapping between backup services and backup drivers."""
self.override_config('backup_driver', "cinder.backup.services.swift")
backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
def test_export_record_with_bad_service(self):
"""Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup_id)
def test_export_record_with_bad_backup_status(self):
"""Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup_id)
def test_export_record(self):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id)
self.assertEqual(export['backup_service'], CONF.backup_driver)
self.assertTrue('backup_url' in export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
def test_import_record_with_bad_service(self):
"""Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.backup.bad_service'
imported_record = self._create_export_record_db_entry()
# Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
# Test that the import backup keeps calling other hosts to find a
# suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_backup_import.called)
def test_import_record_with_invalid_backup(self):
"""Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
def setUp(self):
self.override_config("backup_driver",
"cinder.tests.backup.fake_service_with_verify")
super(BackupTestCaseWithVerify, self).setUp()
def test_import_record_with_verify(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver implements verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class):
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
def test_import_record_with_verify_invalid_backup(self):
"""Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
_mock_record_verify.side_effect = \
exception.InvalidBackup(reason='fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup_id,
'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'error',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_restoring_to_available(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'restoring',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_error(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'creating',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
| apache-2.0 |
pforai/easybuild-framework | easybuild/toolchains/cgmpolf.py | 4 | 1769 | ##
# Copyright 2013-2015 Ghent University
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for cgmpolf compiler toolchain (includes Clang, GFortran, MPICH, OpenBLAS, LAPACK, ScaLAPACK and FFTW).
@author: Dmitri Gribenko (National Technical University of Ukraine "KPI")
"""
from easybuild.toolchains.cgmpich import Cgmpich
from easybuild.toolchains.fft.fftw import Fftw
from easybuild.toolchains.linalg.openblas import OpenBLAS
from easybuild.toolchains.linalg.scalapack import ScaLAPACK
class Cgmpolf(Cgmpich, OpenBLAS, ScaLAPACK, Fftw):
"""Compiler toolchain with Clang, GFortran, MPICH, OpenBLAS, ScaLAPACK and FFTW."""
NAME = 'cgmpolf'
SUBTOOLCHAIN = Cgmpich.NAME
| gpl-2.0 |
abadger/ansible-modules-core | network/nxos/nxos_snmp_location.py | 5 | 12483 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_snmp_location
version_added: "2.2"
short_description: Manages SNMP location information.
description:
- Manages SNMP location configuration.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
location:
description:
- Location information.
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp location is configured
- nxos_snmp_location:
location: Test
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure snmp location is not configured
- nxos_snmp_location:
location: Test
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "New_Test"}
existing:
description: k/v pairs of existing snmp location
type: dict
sample: {"location": "Test"}
end_state:
description: k/v pairs of location info after module execution
returned: always
type: dict or null
sample: {"location": "New_Test"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-server location New_Test"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_location(module):
location = {}
location_regex = '.*snmp-server\slocation\s(?P<location>\S+).*'
command = 'show run snmp'
body = execute_show_command(command, module, command_type='cli_show_ascii')
try:
match_location = re.match(location_regex, body[0], re.DOTALL)
group_location = match_location.groupdict()
location['location'] = group_location["location"]
except (AttributeError, TypeError):
location = {}
return location
def main():
argument_spec = dict(
location=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present')
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
location = module.params['location']
state = module.params['state']
existing = get_snmp_location(module)
changed = False
commands = []
proposed = dict(location=location)
end_state = existing
if state == 'absent':
if existing and existing['location'] == location:
commands.append('no snmp-server location')
elif state == 'present':
if not existing or existing['location'] != location:
commands.append('snmp-server location {0}'.format(location))
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
end_state = get_snmp_location(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 |
domenicosolazzo/practice-django | venv/lib/python2.7/site-packages/pip/_vendor/requests/structures.py | 279 | 3541 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| mit |
jollyroger/debian-buildbot | buildbot/test/unit/test_db_users.py | 2 | 18708 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.db import users
from buildbot.test.fake import fakedb
from buildbot.test.util import connector_component
from twisted.trial import unittest
class TestUsersConnectorComponent(connector_component.ConnectorComponentMixin,
unittest.TestCase):
def setUp(self):
d = self.setUpConnectorComponent(
table_names=['users', 'users_info', 'changes', 'change_users'])
def finish_setup(_):
self.db.users = users.UsersConnectorComponent(self.db)
d.addCallback(finish_setup)
return d
def tearDown(self):
return self.tearDownConnectorComponent()
# sample user data
user1_rows = [
fakedb.User(uid=1, identifier='soap'),
fakedb.UserInfo(uid=1, attr_type='IPv9', attr_data='0578cc6.8db024'),
]
user2_rows = [
fakedb.User(uid=2, identifier='lye'),
fakedb.UserInfo(uid=2, attr_type='git',
attr_data='Tyler Durden <tyler@mayhem.net>'),
fakedb.UserInfo(uid=2, attr_type='irc', attr_data='durden')
]
user3_rows = [
fakedb.User(uid=3, identifier='marla', bb_username='marla',
bb_password='cancer')
]
user1_dict = {
'uid': 1,
'identifier': u'soap',
'bb_username': None,
'bb_password': None,
'IPv9': u'0578cc6.8db024',
}
user2_dict = {
'uid': 2,
'identifier': u'lye',
'bb_username': None,
'bb_password': None,
'irc': u'durden',
'git': u'Tyler Durden <tyler@mayhem.net>'
}
user3_dict = {
'uid': 3,
'identifier': u'marla',
'bb_username': u'marla',
'bb_password': u'cancer',
}
# tests
def test_addUser_new(self):
d = self.db.users.findUserByAttr(identifier='soap',
attr_type='subspace_net_handle',
attr_data='Durden0924')
def check_user(uid):
def thd(conn):
users_tbl = self.db.model.users
users_info_tbl = self.db.model.users_info
users = conn.execute(users_tbl.select()).fetchall()
infos = conn.execute(users_info_tbl.select()).fetchall()
self.assertEqual(len(users), 1)
self.assertEqual(users[0].uid, uid)
self.assertEqual(users[0].identifier, 'soap')
self.assertEqual(len(infos), 1)
self.assertEqual(infos[0].uid, uid)
self.assertEqual(infos[0].attr_type, 'subspace_net_handle')
self.assertEqual(infos[0].attr_data, 'Durden0924')
return self.db.pool.do(thd)
d.addCallback(check_user)
return d
def test_addUser_existing(self):
d = self.insertTestData(self.user1_rows)
d.addCallback(lambda _: self.db.users.findUserByAttr(
identifier='soapy',
attr_type='IPv9',
attr_data='0578cc6.8db024'))
def check_user(uid):
self.assertEqual(uid, 1)
def thd(conn):
users_tbl = self.db.model.users
users_info_tbl = self.db.model.users_info
users = conn.execute(users_tbl.select()).fetchall()
infos = conn.execute(users_info_tbl.select()).fetchall()
self.assertEqual(len(users), 1)
self.assertEqual(users[0].uid, uid)
self.assertEqual(users[0].identifier, 'soap') # not changed!
self.assertEqual(len(infos), 1)
self.assertEqual(infos[0].uid, uid)
self.assertEqual(infos[0].attr_type, 'IPv9')
self.assertEqual(infos[0].attr_data, '0578cc6.8db024')
return self.db.pool.do(thd)
d.addCallback(check_user)
return d
def test_findUser_existing(self):
d = self.insertTestData(
self.user1_rows + self.user2_rows + self.user3_rows)
d.addCallback(lambda _: self.db.users.findUserByAttr(
identifier='lye',
attr_type='git',
attr_data='Tyler Durden <tyler@mayhem.net>'))
def check_user(uid):
self.assertEqual(uid, 2)
def thd(conn):
users_tbl = self.db.model.users
users_info_tbl = self.db.model.users_info
users = conn.execute(users_tbl.select()).fetchall()
infos = conn.execute(users_info_tbl.select()).fetchall()
self.assertEqual((
sorted([tuple(u) for u in users]),
sorted([tuple(i) for i in infos])
), (
[
(1L, u'soap', None, None),
(2L, u'lye', None, None),
(3L, u'marla', u'marla', u'cancer'),
], [
(1L, u'IPv9', u'0578cc6.8db024'),
(2L, u'git', u'Tyler Durden <tyler@mayhem.net>'),
(2L, u'irc', u'durden')
]))
return self.db.pool.do(thd)
d.addCallback(check_user)
return d
def test_addUser_race(self):
def race_thd(conn):
# note that this assumes that both inserts can happen "at once".
# This is the case for DB engines that support transactions, but
# not for MySQL. so this test does not detect the potential MySQL
# failure, which will generally result in a spurious failure.
conn.execute(self.db.model.users.insert(),
uid=99, identifier='soap')
conn.execute(self.db.model.users_info.insert(),
uid=99, attr_type='subspace_net_handle',
attr_data='Durden0924')
d = self.db.users.findUserByAttr(identifier='soap',
attr_type='subspace_net_handle',
attr_data='Durden0924',
_race_hook=race_thd)
def check_user(uid):
self.assertEqual(uid, 99)
def thd(conn):
users_tbl = self.db.model.users
users_info_tbl = self.db.model.users_info
users = conn.execute(users_tbl.select()).fetchall()
infos = conn.execute(users_info_tbl.select()).fetchall()
self.assertEqual(len(users), 1)
self.assertEqual(users[0].uid, uid)
self.assertEqual(users[0].identifier, 'soap')
self.assertEqual(len(infos), 1)
self.assertEqual(infos[0].uid, uid)
self.assertEqual(infos[0].attr_type, 'subspace_net_handle')
self.assertEqual(infos[0].attr_data, 'Durden0924')
return self.db.pool.do(thd)
d.addCallback(check_user)
return d
def test_addUser_existing_identifier(self):
# see http://trac.buildbot.net/ticket/2587
d = self.insertTestData(self.user1_rows)
d.addCallback(lambda _: self.db.users.findUserByAttr(
identifier='soap', # same identifier
attr_type='IPv9',
attr_data='fffffff.ffffff')) # different attr
def check_user(uid):
# creates a new user
self.assertEqual(uid, 2)
def thd(conn):
users_tbl = self.db.model.users
users_info_tbl = self.db.model.users_info
users = conn.execute(users_tbl.select(order_by=users_tbl.c.identifier)).fetchall()
infos = conn.execute(users_info_tbl.select(users_info_tbl.c.uid == uid)).fetchall()
self.assertEqual(len(users), 2)
self.assertEqual(users[1].uid, uid)
self.assertEqual(users[1].identifier, 'soap_2') # unique'd
self.assertEqual(len(infos), 1)
self.assertEqual(infos[0].attr_type, 'IPv9')
self.assertEqual(infos[0].attr_data, 'fffffff.ffffff')
return self.db.pool.do(thd)
d.addCallback(check_user)
return d
def test_getUser(self):
d = self.insertTestData(self.user1_rows)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict, self.user1_dict)
d.addCallback(check1)
return d
def test_getUser_bb(self):
d = self.insertTestData(self.user3_rows)
def get3(_):
return self.db.users.getUser(3)
d.addCallback(get3)
def check3(usdict):
self.assertEqual(usdict, self.user3_dict)
d.addCallback(check3)
return d
def test_getUser_multi_attr(self):
d = self.insertTestData(self.user2_rows)
def get1(_):
return self.db.users.getUser(2)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict, self.user2_dict)
d.addCallback(check1)
return d
def test_getUser_no_match(self):
d = self.insertTestData(self.user1_rows)
def get3(_):
return self.db.users.getUser(3)
d.addCallback(get3)
def check3(none):
self.assertEqual(none, None)
d.addCallback(check3)
return d
def test_getUsers_none(self):
d = self.db.users.getUsers()
def check(res):
self.assertEqual(res, [])
d.addCallback(check)
return d
def test_getUsers(self):
d = self.insertTestData(self.user1_rows)
def get(_):
return self.db.users.getUsers()
d.addCallback(get)
def check(res):
self.assertEqual(res, [dict(uid=1, identifier='soap')])
d.addCallback(check)
return d
def test_getUsers_multiple(self):
d = self.insertTestData(self.user1_rows + self.user2_rows)
def get(_):
return self.db.users.getUsers()
d.addCallback(get)
def check(res):
self.assertEqual(res, [dict(uid=1, identifier='soap'),
dict(uid=2, identifier='lye')])
d.addCallback(check)
return d
def test_getUserByUsername(self):
d = self.insertTestData(self.user3_rows)
def get3(_):
return self.db.users.getUserByUsername("marla")
d.addCallback(get3)
def check3(res):
self.assertEqual(res, self.user3_dict)
d.addCallback(check3)
return d
def test_getUserByUsername_no_match(self):
d = self.insertTestData(self.user3_rows)
def get3(_):
return self.db.users.getUserByUsername("tyler")
d.addCallback(get3)
def check3(none):
self.assertEqual(none, None)
d.addCallback(check3)
return d
def test_updateUser_existing_type(self):
d = self.insertTestData(self.user1_rows)
def update1(_):
return self.db.users.updateUser(
uid=1, attr_type='IPv9', attr_data='abcd.1234')
d.addCallback(update1)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['IPv9'], 'abcd.1234')
self.assertEqual(usdict['identifier'], 'soap') # no change
d.addCallback(check1)
return d
def test_updateUser_new_type(self):
d = self.insertTestData(self.user1_rows)
def update1(_):
return self.db.users.updateUser(
uid=1, attr_type='IPv4', attr_data='123.134.156.167')
d.addCallback(update1)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['IPv4'], '123.134.156.167')
self.assertEqual(usdict['IPv9'], '0578cc6.8db024') # no change
self.assertEqual(usdict['identifier'], 'soap') # no change
d.addCallback(check1)
return d
def test_updateUser_identifier(self):
d = self.insertTestData(self.user1_rows)
def update1(_):
return self.db.users.updateUser(
uid=1, identifier='lye')
d.addCallback(update1)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['identifier'], 'lye')
self.assertEqual(usdict['IPv9'], '0578cc6.8db024') # no change
d.addCallback(check1)
return d
def test_updateUser_bb(self):
d = self.insertTestData(self.user3_rows)
def update3(_):
return self.db.users.updateUser(
uid=3, bb_username='boss', bb_password='fired')
d.addCallback(update3)
def get3(_):
return self.db.users.getUser(3)
d.addCallback(get3)
def check3(usdict):
self.assertEqual(usdict['bb_username'], 'boss')
self.assertEqual(usdict['bb_password'], 'fired')
self.assertEqual(usdict['identifier'], 'marla') # no change
d.addCallback(check3)
return d
def test_updateUser_all(self):
d = self.insertTestData(self.user1_rows)
def update1(_):
return self.db.users.updateUser(
uid=1, identifier='lye', bb_username='marla',
bb_password='cancer', attr_type='IPv4', attr_data='123.134.156.167')
d.addCallback(update1)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['identifier'], 'lye')
self.assertEqual(usdict['bb_username'], 'marla')
self.assertEqual(usdict['bb_password'], 'cancer')
self.assertEqual(usdict['IPv4'], '123.134.156.167')
self.assertEqual(usdict['IPv9'], '0578cc6.8db024') # no change
d.addCallback(check1)
return d
def test_updateUser_race(self):
# called from the db thread, this opens a *new* connection (to avoid
# the existing transaction) and executes a conflicting insert in that
# connection. This will cause the insert in the db method to fail, and
# the data in this insert (8.8.8.8) will appear below.
def race_thd(conn):
conn = self.db.pool.engine.connect()
conn.execute(self.db.model.users_info.insert(),
uid=1, attr_type='IPv4',
attr_data='8.8.8.8')
d = self.insertTestData(self.user1_rows)
def update1(_):
return self.db.users.updateUser(
uid=1, attr_type='IPv4', attr_data='123.134.156.167',
_race_hook=race_thd)
d.addCallback(update1)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['identifier'], 'soap')
self.assertEqual(usdict['IPv4'], '8.8.8.8')
self.assertEqual(usdict['IPv9'], '0578cc6.8db024') # no change
d.addCallback(check1)
return d
def test_update_NoMatch_identifier(self):
d = self.insertTestData(self.user1_rows)
def update3(_):
return self.db.users.updateUser(
uid=3, identifier='abcd')
d.addCallback(update3)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['identifier'], 'soap') # no change
d.addCallback(check1)
return d
def test_update_NoMatch_attribute(self):
d = self.insertTestData(self.user1_rows)
def update3(_):
return self.db.users.updateUser(
uid=3, attr_type='abcd', attr_data='efgh')
d.addCallback(update3)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['IPv9'], '0578cc6.8db024') # no change
d.addCallback(check1)
return d
def test_update_NoMatch_bb(self):
d = self.insertTestData(self.user1_rows)
def update3(_):
return self.db.users.updateUser(
uid=3, attr_type='marla', attr_data='cancer')
d.addCallback(update3)
def get1(_):
return self.db.users.getUser(1)
d.addCallback(get1)
def check1(usdict):
self.assertEqual(usdict['IPv9'], '0578cc6.8db024') # no change
d.addCallback(check1)
return d
def test_removeUser_uid(self):
d = self.insertTestData(self.user1_rows)
def remove1(_):
return self.db.users.removeUser(1)
d.addCallback(remove1)
def check1(_):
def thd(conn):
r = conn.execute(self.db.model.users.select())
r = r.fetchall()
self.assertEqual(len(r), 0)
return self.db.pool.do(thd)
d.addCallback(check1)
return d
def test_removeNoMatch(self):
d = self.insertTestData(self.user1_rows)
def check(_):
return self.db.users.removeUser(uid=3)
d.addCallback(check)
return d
def test_identifierToUid_NoMatch(self):
d = self.db.users.identifierToUid(identifier="soap")
def check(res):
self.assertEqual(res, None)
d.addCallback(check)
return d
def test_identifierToUid_match(self):
d = self.insertTestData(self.user1_rows)
def ident2uid(_):
return self.db.users.identifierToUid(identifier="soap")
d.addCallback(ident2uid)
def check(res):
self.assertEqual(res, 1)
d.addCallback(check)
return d
| gpl-2.0 |
pypa/warehouse | warehouse/utils/db/windowed_query.py | 1 | 2050 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from "Theatrum Chemicum" at
# https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/WindowedRangeQuery
from sqlalchemy import and_, func, text
def column_windows(session, column, windowsize):
"""
Return a series of WHERE clauses against a given column that break it into
windows.
Result is an iterable of tuples, consisting of ((start, end), whereclause),
where (start, end) are the ids.
Requires a database that supports window functions, i.e. Postgresql,
SQL Server, Oracle.
Enhance this yourself ! Add a "where" argument so that windows of just a
subset of rows can be computed.
"""
def int_for_range(start_id, end_id):
if end_id:
return and_(column >= start_id, column < end_id)
else:
return column >= start_id
q = session.query(
column, func.row_number().over(order_by=column).label("rownum")
).from_self(column)
if windowsize > 1:
q = q.filter(text("rownum %% %d=1" % windowsize))
intervals = [row[0] for row in q]
while intervals:
start = intervals.pop(0)
if intervals:
end = intervals[0]
else:
end = None
yield int_for_range(start, end)
def windowed_query(q, column, windowsize):
"""
Break a Query into windows on a given column.
"""
for whereclause in column_windows(q.session, column, windowsize):
for row in q.filter(whereclause).order_by(column):
yield row
| apache-2.0 |
52ai/django-ccsds | tests/reserved_names/tests.py | 405 | 1686 | from __future__ import unicode_literals
import datetime
from django.test import TestCase
from .models import Thing
class ReservedNameTests(TestCase):
def generate(self):
day1 = datetime.date(2005, 1, 1)
Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
day2 = datetime.date(2006, 2, 2)
Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
def test_simple(self):
day1 = datetime.date(2005, 1, 1)
t = Thing.objects.create(when='a', join='b', like='c', drop='d',
alter='e', having='f', where=day1, has_hyphen='h')
self.assertEqual(t.when, 'a')
day2 = datetime.date(2006, 2, 2)
u = Thing.objects.create(when='h', join='i', like='j', drop='k',
alter='l', having='m', where=day2)
self.assertEqual(u.when, 'h')
def test_order_by(self):
self.generate()
things = [t.when for t in Thing.objects.order_by('when')]
self.assertEqual(things, ['a', 'h'])
def test_fields(self):
self.generate()
v = Thing.objects.get(pk='a')
self.assertEqual(v.join, 'b')
self.assertEqual(v.where, datetime.date(year=2005, month=1, day=1))
def test_dates(self):
self.generate()
resp = Thing.objects.dates('where', 'year')
self.assertEqual(list(resp), [
datetime.date(2005, 1, 1),
datetime.date(2006, 1, 1),
])
def test_month_filter(self):
self.generate()
self.assertEqual(Thing.objects.filter(where__month=1)[0].when, 'a')
| bsd-3-clause |
Bysmyyr/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/iam/summarymap.py | 151 | 1660 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class SummaryMap(dict):
def __init__(self, parent=None):
self.parent = parent
dict.__init__(self)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'key':
self._name = value
elif name == 'value':
try:
self[self._name] = int(value)
except ValueError:
self[self._name] = value
else:
setattr(self, name, value)
| bsd-3-clause |
UfSoft/ISPManCCP | extra-packages/pyperl-1.0.1d/t/apply.py | 1 | 2941 | import perl
#if (perl.MULTI_PERL):
# print "1..0"
# raise SystemExit
print "1..14"
def ok(a, b=None):
return "a=" + str(a) + ", b=" + str(b)
perl.eval("""
use Python qw(apply);
$| = 1;
sub {
my $f = shift;
# First some tests that are expected to blow up
eval {
apply($f);
};
#print $@;
# XXX For some strange reason =~ is not to force $@ to stingify, so
# I had to help it with "$@" =~.
# Hmmm, something to fix some other time :-(
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 argument \(0 given\)/;
print "ok 1\n";
eval {
apply($f, undef);
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 argument \(0 given\)/;
print "ok 2\n";
eval {
apply($f, undef, undef);
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 argument \(0 given\)/;
print "ok 3\n";
eval {
apply($f, undef, undef, undef);
};
#print $@;
print "not " unless "$@" =~ /^Too many arguments at \(eval 1\) line \d+./;
print "ok 4\n";
eval {
apply($f, [1,2,3]);
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at most 2 arguments \(3 given\)/;
print "ok 5\n";
eval {
apply($f, [], {b => 2});
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 non-keyword argument \(0 given\)/;
print "ok 6\n";
eval {
apply($f, [1], {a => 2});
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) got multiple values for keyword argument 'a'/;
print "ok 7\n";
eval {
apply($f, [], {a => 2, b => 3, c => 4});
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) got an unexpected keyword argument 'c'/;
print "ok 8\n";
eval {
apply($f, 1);
};
#print $@;
print "not " unless "$@" =~ /^/;
print "ok 9\n";
# Then some tests that are expected to work
$res = apply($f, undef, { a => 101, b => 102 });
#print "$res\\n";
print "not " unless $res eq "a=101, b=102";
print "ok 10\n";
$res = apply($f, undef, { a => 101 });
#print "$res\\n";
print "not " unless $res eq "a=101, b=None";
print "ok 11\n";
$res = apply($f, [101, 102]);
#print "$res\\n";
print "not " unless $res eq "a=101, b=102";
print "ok 12\n";
$res = apply($f, Python::list(101, 102), Python::dict());
#print "$res\\n";
print "not " unless $res eq "a=101, b=102";
print "ok 13\n";
$res = apply($f, [], Python::dict(a => 101));
#print "$res\\n";
print "not " unless $res eq "a=101, b=None";
print "ok 14\n";
}
""")(ok)
| bsd-3-clause |
2014c2g14/w17test2 | static/Brython3.1.1-20150328-091302/Lib/locale.py | 624 | 1918 | def getdefaultlocale():
return __BRYTHON__.language,None
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
return None, None
| agpl-3.0 |
andim27/magiccamp | django/contrib/gis/gdal/libgdal.py | 147 | 3378 | import os, re, sys
from ctypes import c_char_p, CDLL
from ctypes.util import find_library
from django.contrib.gis.gdal.error import OGRException
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, EnvironmentError, ImportError):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared library
lib_names = ['gdal17', 'gdal16', 'gdal15']
elif os.name == 'posix':
# *NIX library names.
lib_names = ['gdal', 'GDAL', 'gdal1.7.0', 'gdal1.6.0', 'gdal1.5.0', 'gdal1.4.0']
else:
raise OGRException('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if not lib_path is None: break
if lib_path is None:
raise OGRException('Could not find the GDAL library (tried "%s"). '
'Try setting GDAL_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Returns the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
#### Version-information functions. ####
# Returns GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Returns only the GDAL version number information."
return _version_info('RELEASE_NAME')
def gdal_full_version():
"Returns the full GDAL version information."
return _version_info('')
def gdal_release_date(date=False):
"""
Returns the release date in a string format, e.g, "2007/06/27".
If the date keyword argument is set to True, a Python datetime object
will be returned instead.
"""
from datetime import date as date_type
rel = _version_info('RELEASE_DATE')
yy, mm, dd = map(int, (rel[0:4], rel[4:6], rel[6:8]))
d = date_type(yy, mm, dd)
if date: return d
else: return d.strftime('%Y/%m/%d')
version_regex = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<subminor>\d+))?')
def gdal_version_info():
ver = gdal_version()
m = version_regex.match(ver)
if not m: raise OGRException('Could not parse GDAL version string "%s"' % ver)
return dict([(key, m.group(key)) for key in ('major', 'minor', 'subminor')])
_verinfo = gdal_version_info()
GDAL_MAJOR_VERSION = int(_verinfo['major'])
GDAL_MINOR_VERSION = int(_verinfo['minor'])
GDAL_SUBMINOR_VERSION = _verinfo['subminor'] and int(_verinfo['subminor'])
GDAL_VERSION = (GDAL_MAJOR_VERSION, GDAL_MINOR_VERSION, GDAL_SUBMINOR_VERSION)
del _verinfo
# GeoJSON support is available only in GDAL 1.5+.
if GDAL_VERSION >= (1, 5):
GEOJSON = True
else:
GEOJSON = False
| bsd-3-clause |
zhenzhai/edx-platform | cms/djangoapps/contentstore/views/error.py | 25 | 1641 | # pylint: disable=missing-docstring,unused-argument
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound)
from edxmako.shortcuts import render_to_string, render_to_response
import functools
from openedx.core.djangolib.js_utils import dump_js_escaped_json
__all__ = ['not_found', 'server_error', 'render_404', 'render_500']
def jsonable_error(status=500, message="The Studio servers encountered an error"):
"""
A decorator to make an error view return an JSON-formatted message if
it was requested via AJAX.
"""
def outer(func):
@functools.wraps(func)
def inner(request, *args, **kwargs):
if request.is_ajax():
content = dump_js_escaped_json({"error": message})
return HttpResponse(content, content_type="application/json",
status=status)
else:
return func(request, *args, **kwargs)
return inner
return outer
@jsonable_error(404, "Resource not found")
def not_found(request):
return render_to_response('error.html', {'error': '404'})
@jsonable_error(500, "The Studio servers encountered an error")
def server_error(request):
return render_to_response('error.html', {'error': '500'})
@jsonable_error(404, "Resource not found")
def render_404(request):
return HttpResponseNotFound(render_to_string('404.html', {}, request=request))
@jsonable_error(500, "The Studio servers encountered an error")
def render_500(request):
return HttpResponseServerError(render_to_string('500.html', {}, request=request))
| agpl-3.0 |
yangbh/dpkt | dpkt/netflow.py | 6 | 14791 | # $Id: netflow.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Netflow."""
import itertools
import struct
import dpkt
class NetflowBase(dpkt.Packet):
"""Base class for Cisco Netflow packets."""
__hdr__ = (
('version', 'H', 1),
('count', 'H', 0),
('sys_uptime', 'I', 0),
('unix_sec', 'I', 0),
('unix_nsec', 'I', 0)
)
def __len__(self):
return self.__hdr_len__ + (len(self.data[0]) * self.count)
def __str__(self):
# for now, don't try to enforce any size limits
self.count = len(self.data)
return self.pack_hdr() + ''.join(map(str, self.data))
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l = []
while buf:
flow = self.NetflowRecord(buf)
l.append(flow)
buf = buf[len(flow):]
self.data = l
class NetflowRecordBase(dpkt.Packet):
"""Base class for netflow v1-v7 netflow records."""
# performance optimizations
def __len__(self):
# don't bother with data
return self.__hdr_len__
def __str__(self):
# don't bother with data
return self.pack_hdr()
def unpack(self, buf):
# don't bother with data
for k, v in itertools.izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = ""
class Netflow1(NetflowBase):
"""Netflow Version 1."""
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v1 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'H', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('tcp_flags', 'B', 0),
('pad2', 'B', 0),
('pad3', 'H', 0),
('reserved', 'I', 0)
)
# FYI, versions 2-4 don't appear to have ever seen the light of day.
class Netflow5(NetflowBase):
"""Netflow Version 5."""
__hdr__ = NetflowBase.__hdr__ + (
('flow_sequence', 'I', 0),
('engine_type', 'B', 0),
('engine_id', 'B', 0),
('reserved', 'H', 0),
)
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v5 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('pad2', 'H', 0),
)
class Netflow6(NetflowBase):
"""Netflow Version 6.
XXX - unsupported by Cisco, but may be found in the field.
"""
__hdr__ = Netflow5.__hdr__
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v6 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('in_encaps', 'B', 0),
('out_encaps', 'B', 0),
('peer_nexthop', 'I', 0),
)
class Netflow7(NetflowBase):
"""Netflow Version 7."""
__hdr__ = NetflowBase.__hdr__ + (
('flow_sequence', 'I', 0),
('reserved', 'I', 0),
)
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v7 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('flags', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('pad2', 'H', 0),
('router_sc', 'I', 0),
)
# No support for v8 or v9 yet.
__sample_v1 = "\x00\x01\x00\x18gza<B\x00\xfc\x1c$\x93\x08p\xac\x01 W\xc0\xa8c\xf7\n\x00\x02\x01\x00\x03\x00\n\x00\x00\x00\x01\x00\x00\x02(gz7,gz7,\\\x1b\x00P\xac\x01\x11,\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x18S\xac\x18\xd9\xaa\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz7|gz7|\xd8\xe3\x00P\xac\x01\x06,\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x14\x18\xac\x18\x8d\xcd\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x05\xdcgz7\x90gz7\x90\x8a\x81\x17o\xac\x01\x066\x10\x00\x00\x00\x00\x04\x00\x03\xac\x0f'$\xac\x01\xe5\x1d\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz:8gz:8\xa3Q\x126\xac)\x06\xfd\x18\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x16E\xac#\x17\x8e\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz:Lgz:L\xc9\xff\x00P\xac\x1f\x06\x86\x02\x00\x00\x00\x00\x03\x00\x1b\xac\r\t\xff\xac\x01\x99\x95\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:Xgz:X\xee9\x00\x17\xac\x01\x06\xde\x10\x00\x00\x00\x00\x04\x00\x03\xac\x0eJ\xd8\xac\x01\xae/\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:hgz:h\xb3n\x00\x15\xac\x01\x06\x81\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01#8\xac\x01\xd9*\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:tgz:t\x00\x00\x83P\xac!\x01\xab\x10\x00\x00\x00\x00\x03\x00\x1b\xac\n`7\xac*\x93J\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:tgz:t\x00\x00\x00\x00\xac\x012\xa9\x10\x00\x00\x00\x00\x04\x00\x07\xac\nG\x1f\xac\x01\xfdJ\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x00(gz:\x88gz:\x88!\x99i\x87\xac\x1e\x06~\x02\x00\x00\x00\x00\x03\x00\x1b\xac\x01(\xc9\xac\x01B\xc4\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x00(gz:\x88gz:\x88}6\x00P\xac\x01\x06\xfe\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0b\x08\xe8\xac\x01F\xe2\xc0\xa82\x02\x00\x04\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz:\x9cgz:\x9c`ii\x87\xac\x01\x06;\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x1d$\xac<\xf0\xc3\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\x9cgz:\x9cF2\x00\x14\xac\x01\x06s\x18\x00\x00\x00\x00\x04\x00\x03\xac\x0b\x11Q\xac\x01\xde\x06\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xb0gz:\xb0\xef#\x1a+\xac)\x06\xe9\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0cR\xd9\xac\x01o\xe8\xc0\xa82\x02\x00\x04\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xc4gz:\xc4\x13n\x00n\xac\x19\x06\xa8\x10\x00\x00\x00\x00\x03\x00\x19\xac\x01=\xdd\xac\x01}\xee\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x00(gz:\xc4gz:\xc4\x00\x00\xdc\xbb\xac\x01\x01\xd3\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0f(\xd1\xac\x01\xcc\xa5\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xd8gz:\xd8\xc5s\x17o\xac\x19\x06#\x18\x00\x00\x00\x00\x03\x00\x07\xac\n\x85[\xc0\xa8cn\n\x00\x02\x01\x00\x04\x00\n\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xe4gz:\xe4\xbfl\x00P\xac\x01\x06\xcf\x10\x00\x00\x00\x00\x04\x00\x07\xac\x010\x1f\xac\x18!E\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x05\xdcgz;\x00gz;\x00\x11\x95\x04\xbe\xc0\xa8\x06\xea\x10\x00\x00\x00\x00\x03\x00\n\xac\x010\xb6\xac\x1e\xf4\xaa\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz;4gz;4\x88d\x00\x17\xac\x01\x06\x1f\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01#_\xac\x1e\xb0\t\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz;Hgz;H\x81S\x00P\xac \x06N\x10\x00\x00\x00\x00\x03\x00\x1b\xac\x01\x04\xd9\xac\x01\x94c\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz;\\gz;\\U\x10\x00P\xac\x01\x06P\x18\x00\x00\x00\x00\x04\x00\x1b\xac\x01<\xae\xac*\xac!\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x00\xfagz;\x84gz;\x84\x0c\xe7\x00P\xac\x01\x11\xfd\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x1f\x1f\xac\x17\xedi\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz;\x98gz;\x98\xba\x17\x00\x16\xac\x01\x06|\x10\x00\x00\x00\x00\x03\x00\x07"
__sample_v5 = '\x00\x05\x00\x1d\xb5\xfa\xc9\xd0:\x0bAB&Vw\xde\x9bsv1\x00\x01\x00\x00\xac\n\x86\xa6\xac\x01\xaa\xf7\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x81\x14\xb5\xfa\x81\x1452\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x91D\xac\x14C\xe4\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x9b\xbd\xb5\xfa\x9b\xbd\x00P\x85\xd7\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x17\xe2\xd7\xac\x01\x8cV\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfao\xb8\xb5\xfao\xb8v\xe8\x17o\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0e\xf2\xe5\xac\x01\x91\xb2\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x81\xee\xb5\xfa\x81\xee\xd0\xeb\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\nCj\xac)\xa7\t\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x85\x92\xb5\xfa\x85\x92\x8c\xb0\x005\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x96=\xac\x15\x1a\xa8\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x86\xe0\xb5\xfa\x86\xe0\xb4\xe7\x00\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01V\xd1\xac\x01\x86\x15\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa}:\xb5\xfa}:[Q\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac2\xf1\xb1\xac)\x19\xca\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x83\xc3\xb5\xfa\x83\xc3\x16,\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0cA4\xac\x01\x9az\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x8d\xa7\xb5\xfa\x8d\xa7\x173\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x1e\xd2\x84\xac)\xd8\xd2\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x8e\x97\xb5\xfa\x8e\x977*\x17o\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x85J\xac \x11\xfc\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x884\xb5\xfa\x884\xf5\xdd\x00\x8f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x04\x80\xac<[n\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x9dr\xb5\xfa\x9drs$\x00\x16\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xb9J\xac"\xc9\xd7\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x90r\xb5\xfa\x90r\x0f\x8d\x00\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac*\xa3\x10\xac\x01\xb4\x19\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x92\x03\xb5\xfa\x92\x03pf\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xabo\xac\x1e\x7fi\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x93\x7f\xb5\xfa\x93\x7f\x00P\x0b\x98\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0c\n\xea\xac\x01\xa1\x15\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfay\xcf\xb5\xfay\xcf[3\x17\xe0\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xbb\xb3\xac)u\x8c\n\x00\x02\x01\x00i\x00\xdb\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x943\xb5\xfa\x943\x00P\x1e\xca\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0fJ`\xac\x01\xab\x94\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x87[\xb5\xfa\x87[\x9a\xd6/\xab\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac*\x0f\x93\xac\x01\xb8\xa3\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x89\xbb\xb5\xfa\x89\xbbn\xe1\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x93\xa1\xac\x16\x80\x0c\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x87&\xb5\xfa\x87&\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x83Z\xac\x1fR\xcd\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x90\r\xb5\xfa\x90\r\xf7*\x00\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0c\xe0\xad\xac\x01\xa8V\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x9c\xf6\xb5\xfa\x9c\xf6\xe5|\x1a+\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x1e\xccT\xac<x&\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x80\xea\xb5\xfa\x80\xea\x00\x00\x00\x00\x00\x00/\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xbb\x18\xac\x01|z\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x88p\xb5\xfa\x88p\x00P\x0b}\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x17\x0er\xac\x01\x8f\xdd\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x89\xf7\xb5\xfa\x89\xf7\r\xf7\x00\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\n\xbb\x04\xac<\xb0\x15\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x90\xa9\xb5\xfa\x90\xa9\x9c\xd0\x00\x8f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\nz?\xac)\x03\xc8\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfaue\xb5\xfaue\xee\xa6\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xb5\x05\xc0\xa8c\x9f\n\x00\x02\x01\x00i\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa{\xc7\xb5\xfa{\xc7\x00P\x86\xa9\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac2\xa5\x1b\xac)0\xbf\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x9bZ\xb5\xfa\x9bZC\xf9\x17\xe0\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def test_net_flow_v1_pack(): pass
def test_net_flow_v1_unpack():
nf = Netflow1(__sample_v1)
assert len(nf.data) == 24
# print repr(nfv1)
def test_net_flow_v5_pack(): pass
def test_net_flow_v5_unpack():
nf = Netflow5(__sample_v5)
assert len(nf.data) == 29
# print repr(nfv5)
if __name__ == '__main__':
test_net_flow_v1_pack()
test_net_flow_v1_unpack()
test_net_flow_v5_pack()
test_net_flow_v5_unpack()
print 'Tests Successful...'
| bsd-3-clause |
cloudera/hue | desktop/core/ext-py/Django-1.11.29/tests/messages_tests/test_fallback.py | 7 | 6524 | from django.contrib.messages import constants
from django.contrib.messages.storage.fallback import (
CookieStorage, FallbackStorage,
)
from django.test import SimpleTestCase
from .base import BaseTests
from .test_cookie import set_cookie_data, stored_cookie_messages_count
from .test_session import set_session_data, stored_session_messages_count
class FallbackTests(BaseTests, SimpleTestCase):
storage_class = FallbackStorage
def get_request(self):
self.session = {}
request = super(FallbackTests, self).get_request()
request.session = self.session
return request
def get_cookie_storage(self, storage):
return storage.storages[-2]
def get_session_storage(self, storage):
return storage.storages[-1]
def stored_cookie_messages_count(self, storage, response):
return stored_cookie_messages_count(self.get_cookie_storage(storage), response)
def stored_session_messages_count(self, storage, response):
return stored_session_messages_count(self.get_session_storage(storage))
def stored_messages_count(self, storage, response):
"""
Return the storage totals from both cookie and session backends.
"""
return (
self.stored_cookie_messages_count(storage, response) +
self.stored_session_messages_count(storage, response)
)
def test_get(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
# Set initial cookie data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
self.assertEqual(list(storage), example_messages)
def test_get_empty(self):
request = self.get_request()
storage = self.storage_class(request)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
self.assertEqual(list(storage), [])
def test_get_fallback(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages[:4] + [CookieStorage.not_finished])
set_session_data(session_storage, example_messages[4:])
self.assertEqual(list(storage), example_messages)
def test_get_fallback_only(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, [CookieStorage.not_finished], encode_empty=True)
set_session_data(session_storage, example_messages)
self.assertEqual(list(storage), example_messages)
def test_flush_used_backends(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
set_cookie_data(cookie_storage, ['cookie', CookieStorage.not_finished])
set_session_data(session_storage, ['session'])
# When updating, previously used but no longer needed backends are
# flushed.
response = self.get_response()
list(storage)
storage.update(response)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_no_fallback(self):
"""
(1) A short number of messages whose data size doesn't exceed what is
allowed in a cookie will all be stored in the CookieBackend.
(2) If the CookieBackend can store all messages, the SessionBackend
won't be written to at all.
"""
storage = self.get_storage()
response = self.get_response()
# Overwrite the _store method of the fallback storage to prove it isn't
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._store = None
for i in range(5):
storage.add(constants.INFO, str(i) * 100)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 5)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_session_fallback(self):
"""
If the data exceeds what is allowed in a cookie, messages which did
not fit are stored in the SessionBackend.
"""
storage = self.get_storage()
response = self.get_response()
# see comment in CookieTests.test_cookie_max_length()
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
def test_session_fallback_only(self):
"""
Large messages, none of which fit in a cookie, are stored in the
SessionBackend (and nothing is stored in the CookieBackend).
"""
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'x' * 5000)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 0)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/323_test_datetime.py | 122 | 1854 | import unittest
import sys
from test.support import import_fresh_module, run_unittest
TESTS = 'test.datetimetester'
# XXX: import_fresh_module() is supposed to leave sys.module cache untouched,
# XXX: but it does not, so we have to save and restore it ourselves.
save_sys_modules = sys.modules.copy()
try:
pure_tests = import_fresh_module(TESTS, fresh=['datetime', '_strptime'],
blocked=['_datetime'])
fast_tests = import_fresh_module(TESTS, fresh=['datetime',
'_datetime', '_strptime'])
finally:
sys.modules.clear()
sys.modules.update(save_sys_modules)
test_modules = [pure_tests, fast_tests]
test_suffixes = ["_Pure", "_Fast"]
# XXX(gb) First run all the _Pure tests, then all the _Fast tests. You might
# not believe this, but in spite of all the sys.modules trickery running a _Pure
# test last will leave a mix of pure and native datetime stuff lying around.
test_classes = []
for module, suffix in zip(test_modules, test_suffixes):
for name, cls in module.__dict__.items():
if not (isinstance(cls, type) and issubclass(cls, unittest.TestCase)):
continue
cls.__name__ = name + suffix
@classmethod
def setUpClass(cls_, module=module):
cls_._save_sys_modules = sys.modules.copy()
sys.modules[TESTS] = module
sys.modules['datetime'] = module.datetime_module
sys.modules['_strptime'] = module._strptime
@classmethod
def tearDownClass(cls_):
sys.modules.clear()
sys.modules.update(cls_._save_sys_modules)
cls.setUpClass = setUpClass
cls.tearDownClass = tearDownClass
test_classes.append(cls)
def test_main():
run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
FHannes/intellij-community | python/testData/inspections/PyNumpyType/ReturnOptional.py | 79 | 1681 | def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are two optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, and the indices of the unique array that
reconstruct the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
.. versionadded:: 1.9.0
If True, also return the number of times each unique value comes up
in `ar`.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
.. versionadded:: 1.9.0
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
"""
ar = np.asanyarray(ar).flatten()
u, indices = unique(a, return_index=True) | apache-2.0 |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/test/test_codecencodings_iso2022.py | 71 | 1464 | #!/usr/bin/env python
#
# Codec encoding tests for ISO 2022 encodings.
from test import test_support
from test import test_multibytecodec_support
import unittest
COMMON_CODEC_TESTS = (
# invalid bytes
(b'ab\xFFcd', 'replace', u'ab\uFFFDcd'),
(b'ab\x1Bdef', 'replace', u'ab\x1Bdef'),
(b'ab\x1B$def', 'replace', u'ab\uFFFD'),
)
class Test_ISO2022_JP(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp'
tstring = test_multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'ab\x1BNdef'),
)
class Test_ISO2022_JP2(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp_2'
tstring = test_multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'abdef'),
)
class Test_ISO2022_KR(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_kr'
tstring = test_multibytecodec_support.load_teststring('iso2022_kr')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'ab\x1BNdef'),
)
# iso2022_kr.txt cannot be used to test "chunk coding": the escape
# sequence is only written on the first line
def test_chunkcoding(self):
pass
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
qgis/QGIS | python/plugins/processing/algs/qgis/EliminateSelection.py | 25 | 9096 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EliminateSelection.py
---------------------
Date : January 2017
Copyright : (C) 2017 by Bernhard Ströbl
Email : bernhard.stroebl@jena.de
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Bernhard Ströbl'
__date__ = 'January 2017'
__copyright__ = '(C) 2017, Bernhard Ströbl'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsApplication,
QgsFeatureRequest,
QgsFeature,
QgsFeatureSink,
QgsGeometry,
QgsProcessingAlgorithm,
QgsProcessingException,
QgsProcessingUtils,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterEnum,
QgsProcessing,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class EliminateSelection(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
MODE = 'MODE'
MODE_LARGEST_AREA = 0
MODE_SMALLEST_AREA = 1
MODE_BOUNDARY = 2
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmDissolve.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmDissolve.svg")
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def flags(self):
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading | QgsProcessingAlgorithm.FlagNotAvailableInStandaloneTool
def initAlgorithm(self, config=None):
self.modes = [self.tr('Largest Area'),
self.tr('Smallest Area'),
self.tr('Largest Common Boundary')]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT,
self.tr('Input layer'), [QgsProcessing.TypeVectorPolygon]))
self.addParameter(QgsProcessingParameterEnum(self.MODE,
self.tr('Merge selection with the neighbouring polygon with the'),
options=self.modes))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Eliminated'), QgsProcessing.TypeVectorPolygon))
def name(self):
return 'eliminateselectedpolygons'
def displayName(self):
return self.tr('Eliminate selected polygons')
def processAlgorithm(self, parameters, context, feedback):
inLayer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
boundary = self.parameterAsEnum(parameters, self.MODE, context) == self.MODE_BOUNDARY
smallestArea = self.parameterAsEnum(parameters, self.MODE, context) == self.MODE_SMALLEST_AREA
if inLayer.selectedFeatureCount() == 0:
feedback.reportError(self.tr('{0}: (No selection in input layer "{1}")').format(self.displayName(), parameters[self.INPUT]))
featToEliminate = []
selFeatIds = inLayer.selectedFeatureIds()
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
inLayer.fields(), inLayer.wkbType(), inLayer.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
for aFeat in inLayer.getFeatures():
if feedback.isCanceled():
break
if aFeat.id() in selFeatIds:
# Keep references to the features to eliminate
featToEliminate.append(aFeat)
else:
# write the others to output
sink.addFeature(aFeat, QgsFeatureSink.FastInsert)
del sink
# Delete all features to eliminate in processLayer
processLayer = QgsProcessingUtils.mapLayerFromString(dest_id, context)
processLayer.startEditing()
# ANALYZE
if len(featToEliminate) > 0: # Prevent zero division
start = 20.00
add = 80.00 / len(featToEliminate)
else:
start = 100
feedback.setProgress(start)
madeProgress = True
# We go through the list and see if we find any polygons we can
# merge the selected with. If we have no success with some we
# merge and then restart the whole story.
while madeProgress: # Check if we made any progress
madeProgress = False
featNotEliminated = []
# Iterate over the polygons to eliminate
for i in range(len(featToEliminate)):
if feedback.isCanceled():
break
feat = featToEliminate.pop()
geom2Eliminate = feat.geometry()
bbox = geom2Eliminate.boundingBox()
fit = processLayer.getFeatures(
QgsFeatureRequest().setFilterRect(bbox).setSubsetOfAttributes([]))
mergeWithFid = None
mergeWithGeom = None
max = 0
min = -1
selFeat = QgsFeature()
# use prepared geometries for faster intersection tests
engine = QgsGeometry.createGeometryEngine(geom2Eliminate.constGet())
engine.prepareGeometry()
while fit.nextFeature(selFeat):
if feedback.isCanceled():
break
selGeom = selFeat.geometry()
if engine.intersects(selGeom.constGet()):
# We have a candidate
iGeom = geom2Eliminate.intersection(selGeom)
if not iGeom:
continue
if boundary:
selValue = iGeom.length()
else:
# area. We need a common boundary in
# order to merge
if 0 < iGeom.length():
selValue = selGeom.area()
else:
selValue = -1
if -1 != selValue:
useThis = True
if smallestArea:
if -1 == min:
min = selValue
else:
if selValue < min:
min = selValue
else:
useThis = False
else:
if selValue > max:
max = selValue
else:
useThis = False
if useThis:
mergeWithFid = selFeat.id()
mergeWithGeom = QgsGeometry(selGeom)
# End while fit
if mergeWithFid is not None:
# A successful candidate
newGeom = mergeWithGeom.combine(geom2Eliminate)
if processLayer.changeGeometry(mergeWithFid, newGeom):
madeProgress = True
else:
raise QgsProcessingException(
self.tr('Could not replace geometry of feature with id {0}').format(mergeWithFid))
start = start + add
feedback.setProgress(start)
else:
featNotEliminated.append(feat)
# End for featToEliminate
featToEliminate = featNotEliminated
# End while
if not processLayer.commitChanges():
raise QgsProcessingException(self.tr('Could not commit changes'))
for feature in featNotEliminated:
if feedback.isCanceled():
break
processLayer.dataProvider().addFeature(feature, QgsFeatureSink.FastInsert)
return {self.OUTPUT: dest_id}
| gpl-2.0 |
soumith/fbthrift | thrift/test/py/TValidatorTest.py | 14 | 6097 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from myBinaryStruct.ttypes import *
from myBoolStruct.ttypes import *
from myByteStruct.ttypes import *
from myComplexStruct.ttypes import *
from myDoubleStruct.ttypes import *
from myI16Struct.ttypes import *
from myI32Struct.ttypes import *
from myMixedStruct.ttypes import *
from mySetStruct.ttypes import *
from myMapStruct.ttypes import *
from myNestedMapStruct.ttypes import *
from mySimpleStruct.ttypes import *
from ThriftTest.ttypes import WithAnnotations
#import logging
#log = logging.getLogger()
#log.setLevel(logging.DEBUG)
#ch = logging.StreamHandler()
#ch.setLevel(logging.DEBUG)
#log.addHandler(ch)
from thrift.util.TValidator import TValidator
class ValidationTest(unittest.TestCase):
def setUp(self):
self.v = TValidator()
def valid(self, msg):
self.assertTrue(self.v.validate(msg))
def wrong(self, msg):
self.assertFalse(self.v.validate(msg))
def testBinary(self):
self.valid(myBinaryStruct(a='xyzzy'))
self.wrong(myBinaryStruct(a=3))
def testBool(self):
self.valid(myBoolStruct(a=True))
self.valid(myBoolStruct(a=False))
self.wrong(myBoolStruct(a=1))
self.wrong(myBoolStruct(a='a'))
def testByte(self):
self.valid(myByteStruct(a=0))
self.valid(myByteStruct(a=127))
self.valid(myByteStruct(a=-128))
self.wrong(myByteStruct(a=1.1))
self.wrong(myByteStruct(a=128))
self.wrong(myByteStruct(a=-129))
def testI16(self):
self.valid(myI16Struct(a=4567))
self.wrong(myI16Struct(a=0xFEDCBA987))
def testI32(self):
self.valid(myI32Struct(a=12131415))
self.wrong(myI32Struct(a=0xFFFFFFFFEDCBA))
def testDouble(self):
self.valid(myDoubleStruct(a=-2.192))
self.valid(myDoubleStruct(a=float('inf')))
self.valid(myDoubleStruct(a=float('-inf')))
self.wrong(myDoubleStruct(a=2))
def testMixed(self):
self.valid(myMixedStruct(
a=[],
b=[mySuperSimpleStruct(a=5)],
c={'flame': -8, 'fire': -191},
d={},
e=set([1, 2, 3, 4])
))
def testStruct(self):
self.valid(mySetStruct(a=set([4, 8, 15, 16])))
self.valid(mySetStruct(a=set([])))
self.wrong(mySetStruct(a=set([1, 0xFFFFFFFFFF, 2])))
def testMap(self):
self.valid(myMapStruct(
stringMap={"a": "A", "b": "B"},
boolMap={True: "True", False: "False"},
byteMap={1: "one", 2: "two"},
doubleMap={float("0.1"): "0.one", float("0.2"): "0.two"},
enumMap={1: "male", 2: "female"}
))
self.valid(mySimpleStruct(
a=False,
b=87,
c=7880,
d=-7880,
e=-1,
f=-0.1,
g='T-bone'
))
self.wrong(mySimpleStruct(a=1))
self.valid(myComplexStruct(
mySimpleStruct(
a=True,
b=92,
c=902,
d=65536,
e=123456789,
f=3.1415,
g='Whan that Aprille'
),
b=[314, 15, 9, 26535],
c={"qwerty": mySimpleStruct(c=1),
"slippy": mySimpleStruct(a=False, b=-4, c=5)},
e=EnumTest.EnumTwo,
x=ExceptionTest("test")
))
def testCustomValidator(self):
def a_must_be_true(v):
return v.a
self.v.addClassValidator('mySimpleStruct', a_must_be_true)
self.valid(myComplexStruct(
mySimpleStruct(a=True),
))
self.wrong(myComplexStruct(
mySimpleStruct(a=False),
))
def testNestedMap(self):
self.valid(myNestedMapStruct(
maps={"1": {"1": mySimpleStruct(c=1)},
"2": {"2": mySimpleStruct(a=False, c=2)}}
))
self.wrong(myNestedMapStruct(
maps={"1": {"1": mySimpleStruct(c=1)},
"2": {"2": mySimpleStruct(a=0, c=2)}}
))
def testEnumSpec(self):
self.assertTrue(
hasattr(myComplexStruct.thrift_spec[4][3], '_NAMES_TO_VALUES'))
def testAnnotations(self):
self.assertTrue(hasattr(WithAnnotations, 'thrift_field_annotations'))
self.assertTrue(hasattr(WithAnnotations, 'thrift_struct_annotations'))
self.assertEquals(
WithAnnotations.thrift_field_annotations,
{
1: {
'test.annotation': 'none',
'test.hidden': '1',
},
3: {
'test.hidden': '3',
},
},
)
self.assertEquals(
WithAnnotations.thrift_struct_annotations,
{
'test.struct_annotation': 'ok',
'test.partial': '1',
'test.complex': """
public:
bool empty() const {
return !(__isset.m1 ||
__isset.m2 ||
__isset.s1);
}
""",
},
)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
openlmi/openlmi-scripts | commands/storage/lmi/scripts/storage/cmd/thinlv.py | 2 | 4741 | # coding=utf-8
# Storage Management Providers
#
# Copyright (C) 2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Authors: Jan Synacek <jsynacek@redhat.com>
#
"""
Thin Logical Volume management.
Usage:
%(cmd)s list [ <tp> ...]
%(cmd)s create <tp> <name> <size>
%(cmd)s delete <tlv> ...
%(cmd)s show [ <tlv> ...]
Commands:
list List available thin logical volumes on given thin pools.
If no thin pools are provided, all thin logical volumes are
listed.
create Create a thin logical volume on given thin pool.
delete Delete given thin logical volume.
show Show detailed information about given Thin Logical Volumes. If no
Thin Logical Volumes are provided, all of them are displayed.
Options:
tp Name of the thin pool, with or without `/dev/` prefix.
size Size of the new logical volume, by default in bytes.
'T', 'G', 'M' or 'K' suffix can be used to specify other
units (TiB, GiB, MiB and KiB) - '1K' specifies 1 KiB
(= 1024 bytes).
The suffix is case insensitive, i.e. 1g = 1G = 1073741824
bytes.
"""
from lmi.shell.LMIUtil import lmi_isinstance
from lmi.scripts.common import command
from lmi.scripts.common.formatter import command as fcmd
from lmi.scripts.storage import show, lvm
from lmi.scripts.storage.common import size2str, str2device, str2size, str2vg
class ThinLVList(command.LmiLister):
COLUMNS = ("Name", "Thin Pool", "Size")
ARG_ARRAY_SUFFIX = 's'
def execute(self, ns, tps=None):
"""
Implementation of 'thinlv list' command.
"""
for tlv in lvm.get_tlvs(ns, tps):
size = size2str(tlv.NumberOfBlocks * tlv.BlockSize,
self.app.config.human_friendly)
tp = lvm.get_lv_vg(ns, tlv)
yield (tlv.ElementName, tp.ElementName, size)
class ThinLVCreate(command.LmiCheckResult):
EXPECT = None
def execute(self, ns, tp, name, size):
"""
Implementation of 'thinlv create' command.
"""
tp = str2vg(ns, tp[0])
lvm.create_tlv(ns, tp, name, str2size(size))
class ThinLVDelete(command.LmiCheckResult):
EXPECT = None
ARG_ARRAY_SUFFIX = 's'
def execute(self, ns, tlvs):
"""
Implementation of 'thinlv delete' command.
"""
for tlv in tlvs:
lvm.delete_lv(ns, tlv)
class ThinLVShow(command.LmiLister):
COLUMNS = ('Name', 'Value')
ARG_ARRAY_SUFFIX = 's'
def execute(self, ns, tlvs=None):
"""
Implementation of 'thinlv show' command.
"""
if not tlvs:
tlvs = lvm.get_tlvs(ns)
for tlv in tlvs:
tlv = str2device(ns, tlv)
cmd = fcmd.NewTableCommand(title=tlv.DeviceID)
yield cmd
for line in show.tlv_show(ns, tlv, self.app.config.human_friendly):
yield line
class ThinLV(command.LmiCommandMultiplexer):
OWN_USAGE = __doc__
COMMANDS = {
'list' : ThinLVList,
'create' : ThinLVCreate,
'delete' : ThinLVDelete,
'show' : ThinLVShow,
}
| bsd-2-clause |
jdgillespie91/trackerSpend | data/expenditure/submit_automated_expenditure.py | 1 | 8161 | # This script adds any spend that occurs regularly on a monthly basis.
import datetime
import gspread
import json
import logging
import os
import requests
import smtplib
import sys
from configs import config
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from oauth2client.client import SignedJwtAssertionCredentials
class Script:
def __init__(self):
self.today = datetime.datetime.today()
self.directory = os.path.dirname(__file__)
self.filename = os.path.splitext(os.path.basename(__file__))[0]
self.path = os.path.join(self.directory, self.filename)
class Flag(Script):
def __init__(self, entry):
Script.__init__(self) # Change this to super if more pythonic.
self.today = self.today.strftime('%Y-%m-%d')
self.directory = os.path.join(self.directory, 'flags')
self.filename = '{0}_{1}.flag'.format(entry.category, self.today)
self.path = os.path.join(self.directory, self.filename)
def exists(self):
if os.path.isfile(self.path):
return True
else:
return False
def touch(self):
open(self.path, 'w').close()
def untouch(self):
os.remove(self.path)
class Entry:
def __init__(self, amount, category, peer_pressure, notes, frequency, due_date, active):
self.amount = amount
self.category = category
self.peer_pressure = peer_pressure
self.notes = notes
self.frequency = frequency
self.due_date = due_date
self.active = active
class Form:
def __init__(self, entry):
self.amount = entry.amount
self.category = entry.category
self.peer_pressure = entry.peer_pressure
self.notes = entry.notes
self.conf = config.Config('expenditure_form')
self.submission = {'entry.1788911046': self.amount,
'entry.22851461': '__other_option__',
'entry.22851461.other_option_response': self.category,
'entry.2106932303': self.peer_pressure,
'entry.1728679999': self.notes}
self.response_code = None
def submit(self):
response = requests.post(self.conf.url, self.submission)
self.response_code = response.status_code
def email(self, success):
# The following code is based on
# http://stackoverflow.com/questions/778202/smtplib-and-gmail-python-script-problems
# http://en.wikibooks.org/wiki/Python_Programming/Email
# I need to troubleshoot and test for errors.
message = MIMEMultipart()
message['From'] = self.conf.sender
message['To'] = self.conf.recipient
message['Subject'] = 'Expenditure Submission Update (Automated Email)'
if success:
body = 'The following entry has been submitted.\n\nAmount: {0}\nCategory: {1}\nPeer pressure: {2}\n' \
'Notes: {3}\n'.format(self.amount, self.category, self.peer_pressure, self.notes)
else:
body = 'The following entry failed submission.\n\nAmount: {0}\nCategory: {1}\nPeer pressure: {2}\n' \
'Notes: {3}\n'.format(self.amount, self.category, self.peer_pressure, self.notes)
message.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(self.conf.username, self.conf.password)
server.sendmail(self.conf.sender, self.conf.recipient, message.as_string())
server.close()
# Initialise the Entry class based on a list row.
def create_entry(row):
category = row[1]
peer_pressure = row[2]
notes = row[3]
frequency = row[4]
active = True if row[6] == 'Yes' else False
# We assign zero to both amount and due_date if either are invalid types. We do this silently because the email
# confirmation will contain the details of the submission and highlight any issues that need to be addressed.
try:
amount = float(row[0])
due_date = int(row[5])
except (TypeError, ValueError):
amount = 0
due_date = 0
entry = Entry(amount, category, peer_pressure, notes, frequency, due_date, active)
return entry
def create_logger(script):
today = script.today.strftime('%Y-%m-%d_%H:%M:%S')
directory = os.path.join(script.directory, 'logs')
filename = '{0}_{1}.log'.format(script.filename, today)
path = os.path.join(directory, filename)
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
# Add file handler to logger.
file_handler = logging.FileHandler(path)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Log file created: {0}\n'.format(path))
# Add smtp handler to logger.
# smtp_handler = logging.handlers.SMTPHandler(... # Complete this
# logger.debug('SMTP functionality configured.')
return logger
def parse_entries_sheet():
conf = config.Config('expenditure_entries')
json_key = json.load(open(conf.key))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], 'UTF-8'), scope)
session = gspread.authorize(credentials)
workbook = session.open_by_key(conf.workbook)
worksheet = workbook.worksheet(conf.worksheet)
# Parse row-by-row until an empty row is encountered (data starts on second row).
row_index = 2
entries = []
while worksheet.row_values(row_index) and row_index <= worksheet.row_count:
row = worksheet.row_values(row_index)
entry = create_entry(row)
entries.append(entry)
row_index += 1
return entries
if __name__ == '__main__':
script = Script()
logger = create_logger(script)
logger.info('Processing entries sheet.')
entries = parse_entries_sheet()
logger.info('Entries sheet processed.\n')
for entry in entries:
logger.info('Processing entry: {0}.'.format(entry.category))
if entry.active:
logger.info('Entry is active. Continuing...')
flag = Flag(entry)
if not flag.exists():
logger.info('The flag file does not exist. Touching...')
flag.touch()
if entry.frequency == 'Monthly':
if entry.due_date == script.today.day: # Think about introducing a "today" variable. I don't think it's logical to include "today" in the Script class.
logger.info('An entry is required. Submitting...')
form = Form(entry)
form.submit()
if form.response_code == requests.codes.ok: # Have this as try: form.submit() as opposed to if/else (will read better).
logger.info('The submission was accepted. Moving to next entry.\n')
form.email(success=True)
else:
logger.info('The submission was not accepted. '
'Removing flag file and moving to next entry.\n')
form.email(success=False)
flag.untouch()
else:
logger.info('A submission is not required today. '
'Removing flag file and moving to next entry.\n'.format(entry.frequency))
flag.untouch()
else:
logger.info('{0} spend is not yet implemented. '
'Removing flag file and moving to next entry.\n'.format(entry.frequency))
flag.untouch()
continue
else:
logger.info('The flag file exists. Moving to next entry.\n')
else:
logger.info('Entry is inactive. Moving to next entry.\n')
logger.info('End of script.')
sys.exit(0)
| mit |
christianeisendle/linux | scripts/analyze_suspend.py | 1537 | 120394 | #!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
version = 3.0
verbose = False
testdir = '.'
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
modename = {
'freeze': 'Suspend-To-Idle (S0)',
'standby': 'Power-On Suspend (S1)',
'mem': 'Suspend-to-RAM (S3)',
'disk': 'Suspend-to-disk (S4)'
}
mempath = '/dev/mem'
powerfile = '/sys/power/state'
suspendmode = 'mem'
hostname = 'localhost'
prefix = 'test'
teststamp = ''
dmesgfile = ''
ftracefile = ''
htmlfile = ''
rtcwake = False
rtcwaketime = 10
rtcpath = ''
android = False
adb = 'adb'
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
notestrun = False
altdevname = dict()
postresumetime = 0
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
postresumefmt = '# post resume time (?P<t>[0-9]*)$'
stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
def __init__(self):
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
def setOutputFile(self):
if((self.htmlfile == '') and (self.dmesgfile != '')):
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if((self.htmlfile == '') and (self.ftracefile != '')):
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
if(self.htmlfile == ''):
self.htmlfile = 'output.html'
def initTestOutput(self, subdir):
if(not self.android):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
else:
self.prefix = 'android'
v = os.popen(self.adb+' shell cat /proc/version').read().strip()
kver = string.split(v)[2]
testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S')
if(subdir != "."):
self.testdir = subdir+"/"+testtime
else:
self.testdir = testtime
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
os.mkdir(self.testdir)
def setDeviceFilter(self, devnames):
self.devicefilter = string.split(devnames)
def rtcWakeAlarm(self):
os.system('echo 0 > '+self.rtcpath+'/wakealarm')
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath))
sysvals = SystemValues()
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# root structure, started as dmesg & ftrace, but now only ftrace
# contents: times for suspend start/end, resume start/end, fwdata
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes intradev trace events
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
def __init__(self, num):
idchar = 'abcdefghijklmnopqrstuvwxyz'
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
def getStart(self):
return self.dmesg[self.phases[0]]['start']
def setStart(self, time):
self.start = time
self.dmesg[self.phases[0]]['start'] = time
def getEnd(self):
return self.dmesg[self.phases[-1]]['end']
def setEnd(self, time):
self.end = time
self.dmesg[self.phases[-1]]['end'] = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
return False
return True
def addIntraDevTraceEvent(self, action, name, pid, time):
if(action == 'mutex_lock_try'):
color = 'red'
elif(action == 'mutex_lock_pass'):
color = 'green'
elif(action == 'mutex_unlock'):
color = 'blue'
else:
# create separate colors based on the name
v1 = len(name)*10 % 256
v2 = string.count(name, 'e')*100 % 256
v3 = ord(name[0])*20 % 256
color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3)
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
e = TraceEvent(action, name, color, time)
if('traceevents' not in d):
d['traceevents'] = []
d['traceevents'].append(e)
return d
break
return 0
def capIntraDevTraceEvent(self, action, name, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
if('traceevents' not in d):
return
for e in d['traceevents']:
if(e.action == action and
e.name == name and not e.ready):
e.length = time - e.time
e.ready = True
break
return
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('traceevents' in d):
for e in d['traceevents']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# first trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
# shift the timeline so that tZero is the new 0
self.tSuspended -= tZero
self.tResumed -= tZero
self.start -= tZero
self.end -= tZero
for phase in self.phases:
p = self.dmesg[phase]
p['start'] -= tZero
p['end'] -= tZero
list = p['list']
for name in list:
d = list[name]
d['start'] -= tZero
d['end'] -= tZero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= tZero
cg.end -= tZero
for line in cg.list:
line.time -= tZero
if('traceevents' in d):
for e in d['traceevents']:
e.time -= tZero
def newPhaseWithSingleAction(self, phasename, devname, start, end, color):
for phase in self.phases:
self.dmesg[phase]['order'] += 1
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = dict()
list[devname] = \
{'start': start, 'end': end, 'pid': 0, 'par': '',
'length': (end-start), 'row': 0, 'id': devid, 'drv': '' };
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': 0}
self.phases = self.sortedPhases()
def newPhase(self, phasename, start, end, color, order):
if(order < 0):
order = len(self.phases)
for phase in self.phases[order:]:
self.dmesg[phase]['order'] += 1
if(order > 0):
p = self.phases[order-1]
self.dmesg[p]['end'] = start
if(order < len(self.phases)):
p = self.phases[order]
self.dmesg[p]['start'] = end
list = dict()
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': order}
self.phases = self.sortedPhases()
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
# remove all by the relatives of the filter devnames
filter = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for name in devicefilter:
dev = name
while(dev in list):
if(dev not in filter):
filter.append(dev)
dev = list[dev]['par']
children = self.deviceDescendants(name, phase)
for dev in children:
if(dev not in filter):
filter.append(dev)
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
pid = list[name]['pid']
if(name not in filter and pid >= 0):
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.getEnd())
def newActionGlobal(self, name, start, end):
# which phase is this device callback or action "in"
targetphase = "none"
overlap = 0.0
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
o = max(0, min(end, pend) - max(start, pstart))
if(o > overlap):
targetphase = phase
overlap = o
if targetphase in self.phases:
self.newAction(targetphase, name, -1, '', start, end, '')
return True
return False
def newAction(self, phase, name, pid, parent, start, end, drv):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid, 'drv': drv }
def deviceIDs(self, devlist, phase):
idlist = []
list = self.dmesg[phase]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ''
pdevid = ''
list = self.dmesg[phase]['list']
if devname in list:
pdev = list[devname]['par']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def deviceDescendants(self, devname, phase):
children = self.deviceChildren(devname, phase)
family = children
for child in children:
family += self.deviceDescendants(child, phase)
return family
def deviceChildrenIDs(self, devname, phase):
devlist = self.deviceChildren(devname, phase)
return self.deviceIDs(devlist, phase)
def printDetails(self):
vprint(' test start: %f' % self.start)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' test end: %f' % self.end)
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
clist = self.deviceChildren(cname, 'resume')
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
# Class: TraceEvent
# Description:
# A container for trace event data found in the ftrace file
class TraceEvent:
ready = False
name = ''
time = 0.0
color = '#FFFFFF'
length = 0.0
action = ''
def __init__(self, a, n, c, t):
self.action = a
self.name = n
self.color = c
self.time = t
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ''
type = ''
def __init__(self, t, m, d):
self.time = float(t)
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
if(not match):
return False
id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu'))
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
print('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
print('Too much data for '+id+\
' '+window+', ignoring this callback')
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def slice(self, t0, tN):
minicg = FTraceCallGraph()
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l, 0)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == 'stdout'):
print('[%f - %f]') % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
fp.close()
# Class: Timeline
# Description:
# A container for a suspend/resume html timeline. In older versions
# of the script there were multiple timelines, but in the latest
# there is only one.
class Timeline:
html = {}
scaleH = 0.0 # height of the row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': '',
'legend': '',
'scale': ''
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
ftemp = dict()
ttemp = dict()
inthepipe = False
tracertype = ''
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
def isReady(self):
if(tracertype == '' or not data):
return False
return True
def setTracerType(self, tracer):
self.tracertype = tracer
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer, False)
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
global sysvals
if(sysvals.verbose):
print(msg)
# Function: initFtrace
# Description:
# Configure ftrace to use trace events and/or a callgraph
def initFtrace():
global sysvals
tp = sysvals.tpath
cf = 'dpm_run_callback'
if(sysvals.usetraceeventsonly):
cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback'
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system('echo 0 > '+tp+'tracing_on')
# set the trace clock to global
os.system('echo global > '+tp+'trace_clock')
# set trace buffer to a huge value
os.system('echo nop > '+tp+'current_tracer')
os.system('echo 100000 > '+tp+'buffer_size_kb')
# initialize the callgraph trace, unless this is an x2 run
if(sysvals.usecallgraph and sysvals.execcount == 1):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | grep '+\
cf+' > '+tp+'set_graph_function')
if(sysvals.usetraceevents):
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system('echo 1 > '+sysvals.epath+e+'/enable')
# clear the trace buffer
os.system('echo "" > '+tp+'trace')
# Function: initFtraceAndroid
# Description:
# Configure ftrace to capture trace events
def initFtraceAndroid():
global sysvals
tp = sysvals.tpath
if(sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
# set the trace clock to global
os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'")
# set trace buffer to a huge value
os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'")
os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'")
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system(sysvals.adb+" shell 'echo 1 > "+\
sysvals.epath+e+"/enable'")
# clear the trace buffer
os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
# Output:
# True or False
def verifyFtrace():
global sysvals
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = sysvals.tpath
if(sysvals.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip()
if(out != tp+f):
return False
else:
if(os.path.exists(tp+f) == False):
return False
return True
# Function: parseStamp
# Description:
# Pull in the stamp comment line from the data file(s),
# create the stamp, and add it to the global sysvals object
# Arguments:
# m: the valid re.match output for the stamp line
def parseStamp(m, data):
global sysvals
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
sysvals.suspendmode = data.stamp['mode']
if not sysvals.stamp:
sysvals.stamp = data.stamp
# Function: diffStamp
# Description:
# compare the host, kernel, and mode fields in 3 stamps
# Arguments:
# stamp1: string array with mode, kernel, and host
# stamp2: string array with mode, kernel, and host
# Return:
# True if stamps differ, False if they're the same
def diffStamp(stamp1, stamp2):
if 'host' in stamp1 and 'host' in stamp2:
if stamp1['host'] != stamp2['host']:
return True
if 'kernel' in stamp1 and 'kernel' in stamp2:
if stamp1['kernel'] != stamp2['kernel']:
return True
if 'mode' in stamp1 and 'mode' in stamp2:
if stamp1['mode'] != stamp2['mode']:
return True
return False
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
global sysvals
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read()
if(not out):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out):
sysvals.usetraceevents = True
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
global sysvals
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = -1
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the time stamp first (signifies the start of the test run)
m = re.match(sysvals.stampfmt, line)
if(m):
testidx += 1
parseStamp(m, testrun[testidx].data)
continue
# pull out any firmware data
if(re.match(sysvals.firmwarefmt, line)):
continue
# if we havent found a test time stamp yet keep spinning til we do
if(testidx < 0):
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun[testidx].setTracerType(tracer)
continue
# parse only valid lines, if this isnt one move on
m = re.match(testrun[testidx].ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun[testidx].cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
data = testrun[testidx].data
if(not testrun[testidx].inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun[testidx].inthepipe = True
data.setStart(t.time)
continue
else:
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
testrun[testidx].inthepipe = False
data.setEnd(t.time)
if(testidx == testcnt - 1):
break
continue
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
if(sysvals.verbose):
test.data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testruns) > 1):
t1e = testruns[0].getEnd()
t2s = testruns[-1].getStart()
testruns[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
global sysvals
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
# extract the callgraph and traceevent data
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp line: each stamp means a new test run
m = re.match(sysvals.stampfmt, line)
if(m):
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
parseStamp(m, data)
continue
if(not data):
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun.setTracerType(tracer)
continue
# post resume time line: did this test run include post-resume data
m = re.match(sysvals.postresumefmt, line)
if(m):
t = int(m.group('t'))
if(t > 0):
sysvals.postresumetime = t
continue
# ftrace line: parse only valid lines
m = re.match(testrun.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not testrun.inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun.inthepipe = True
data.setStart(t.time)
continue
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
if(sysvals.postresumetime > 0):
phase = 'post_resume'
data.newPhase(phase, t.time, t.time, '#FF9966', -1)
else:
testrun.inthepipe = False
data.setEnd(t.time)
continue
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(re.match('acpi_suspend\[.*', t.name) or
re.match('suspend_enter\[.*', name)):
continue
# -- phase changes --
# suspend_prepare start
if(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk']):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
if(len(testrun.ttemp[name]) > 0):
# if an antry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# callgraph processing
elif sysvals.usecallgraph:
# this shouldn't happen, but JIC, ignore callgraph data post-res
if(phase == 'post_resume'):
continue
# create a callgraph object for the data
if(pid not in testrun.ftemp):
testrun.ftemp[pid] = []
testrun.ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun.ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun.ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testruns:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 2:
continue
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
if(cg.list[0].name in borderphase):
p = borderphase[cg.list[0].name]
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg.slice(dev['start'], dev['end'])
continue
if(cg.list[0].name != 'dpm_run_callback'):
continue
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if(sysvals.verbose):
data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testdata) > 1):
t1e = testdata[0].getEnd()
t2s = testdata[-1].getStart()
testdata[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
global sysvals
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
# there can be multiple test runs in a single file delineated by stamps
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match(sysvals.stampfmt, line)
if(m):
if(data):
testruns.append(data)
data = Data(len(testruns))
parseStamp(m, data)
continue
if(not data):
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
data.dmesgtext.append(line)
if(re.match('ACPI: resume from mwait', m.group('msg'))):
print('NOTE: This suspend appears to be freeze rather than'+\
' %s, it will be treated as such' % sysvals.suspendmode)
sysvals.suspendmode = 'freeze'
else:
vprint('ignoring dmesg line: %s' % line.replace('\n', ''))
testruns.append(data)
lf.close()
if(not data):
print('ERROR: analyze_suspend header missing from dmesg log')
sys.exit()
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
global sysvals
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
doWarning('INVALID DMESG LINE: '+\
line.replace('\n', ''), 'dmesg')
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# -- phase changes --
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
phase = 'post_resume'
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# -- non-devicecallback actions --
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < data.start):
data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > data.end):
data.setEnd(end)
data.newActionGlobal(name, begin, end)
if(sysvals.verbose):
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list of devices/actions for a single phase
# sortedkeys: cronologically sorted key list to use
# Output:
# The total number of rows needed to display this phase of the timeline
def setTimelineRows(list, sortedkeys):
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create the timescale header for the html timeline
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs, i.e. the zero time
# Output:
# The html code needed to display the time scale
def createTimeScale(t0, tMax, tSuspended):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = '%0.fms' % (float(i)*tS*1000)
else:
val = ''
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ''
elif(i == divSuspend):
val = 'S/R'
else:
val = '%0.fms' % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile):
global sysvals
# print out the basic summary of all the tests
hf = open(htmlfile, 'w')
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend Summary</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {font: 22px Arial;border:1px solid;}\n\
th {border: 1px solid black;background-color:#A7C942;color:white;}\n\
td {text-align: center;}\n\
tr.alt td {background-color:#EAF2D3;}\n\
tr.avg td {background-color:#BDE34C;}\n\
a:link {color: #90B521;}\n\
a:visited {color: #495E09;}\n\
a:hover {color: #B1DF28;}\n\
a:active {color: #FFFFFF;}\n\
</style>\n</head>\n<body>\n'
# group test header
count = len(testruns)
headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n'
html += headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'],
sysvals.stamp['time'], count)
# check to see if all the tests have the same value
stampcolumns = False
for data in testruns:
if diffStamp(sysvals.stamp, data.stamp):
stampcolumns = True
break
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">Click Here</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n'
html += th.format("Test #")
if stampcolumns:
html += th.format("Hostname")
html += th.format("Kernel Version")
html += th.format("Suspend Mode")
html += th.format("Test Time")
html += th.format("Suspend Time")
html += th.format("Resume Time")
html += th.format("Detail")
html += '</tr>\n'
# test data, 1 row per test
sTimeAvg = 0.0
rTimeAvg = 0.0
num = 1
for data in testruns:
# data.end is the end of post_resume
resumeEnd = data.dmesg['resume_complete']['end']
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
# test num
html += td.format("test %d" % num)
num += 1
if stampcolumns:
# host name
val = "unknown"
if('host' in data.stamp):
val = data.stamp['host']
html += td.format(val)
# host kernel
val = "unknown"
if('kernel' in data.stamp):
val = data.stamp['kernel']
html += td.format(val)
# suspend mode
val = "unknown"
if('mode' in data.stamp):
val = data.stamp['mode']
html += td.format(val)
# test time
val = "unknown"
if('time' in data.stamp):
val = data.stamp['time']
html += td.format(val)
# suspend time
sTime = (data.tSuspended - data.start)*1000
sTimeAvg += sTime
html += td.format("%3.3f ms" % sTime)
# resume time
rTime = (resumeEnd - data.tResumed)*1000
rTimeAvg += rTime
html += td.format("%3.3f ms" % rTime)
# link to the output html
html += tdlink.format(data.outfile)
html += '</tr>\n'
# last line: test average
if(count > 0):
sTimeAvg /= count
rTimeAvg /= count
html += '<tr class="avg">\n'
html += td.format('Average') # name
if stampcolumns:
html += td.format('') # host
html += td.format('') # kernel
html += td.format('') # mode
html += td.format('') # time
html += td.format("%3.3f ms" % sTimeAvg) # suspend time
html += td.format("%3.3f ms" % rTimeAvg) # resume time
html += td.format('') # output link
html += '</tr>\n'
# flush the data to file
hf.write(html+'</table>\n')
hf.write('</body>\n</html>\n')
hf.close()
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
global sysvals
for data in testruns:
data.normalizeTime(testruns[-1].tSuspended)
x2changes = ['', 'absolute']
if len(testruns) > 1:
x2changes = ['1', 'relative']
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0]
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline()
# Generate the header for this timeline
textnum = ['First', 'Second']
for data in testruns:
tTotal = data.end - data.start
tEnd = data.dmesg['resume_complete']['end']
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if data.fwValid:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \
(data.fwSuspend/1000000.0))
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \
(data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
if(len(testruns) > 1):
testdesc1 = testdesc2 = textnum[data.testnumber]
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1)
devtl.html['timeline'] += thtml
sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \
data.getStart())*1000)
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
rktime = '%.3f'%((data.getEnd() - \
data.dmesg['resume_machine']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, \
sftime, rftime, rktime, testdesc2)
else:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000)
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000)
testdesc = 'Kernel'
if(len(testruns) > 1):
testdesc = textnum[data.testnumber]+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc)
devtl.html['timeline'] += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tSuspended = testruns[-1].tSuspended
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
timelinerows = 0
for data in testruns:
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create bounding box, add buttons
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_devlist1
if len(testruns) > 1:
devtl.html['timeline'] += html_devlist2
devtl.html['timeline'] += html_zoombox
devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height)
# draw the colored boxes for each of the phases
for data in testruns:
for b in data.dmesg:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \
data.dmesg[b]['color'], '')
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
drv = ''
dev = phaselist[d]
if(d in sysvals.altdevname):
name = sysvals.altdevname[d]
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((dev['start']-t0)*100)/tTotal)
width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += html_device.format(dev['id'], \
d+drv+length+b, left, top, '%.3f'%height, width, name+drv)
# draw any trace events found
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for name in phaselist:
dev = phaselist[name]
if('traceevents' in dev):
vprint('Debug trace events found for device %s' % name)
vprint('%20s %20s %10s %8s' % ('action', \
'name', 'time(ms)', 'length(ms)'))
for e in dev['traceevents']:
vprint('%20s %20s %10.3f %8.3f' % (e.action, \
e.name, e.time*1000, e.length*1000))
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((e.time-t0)*100)/tTotal)
width = '%.3f' % (e.length*100/tTotal)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += \
html_traceevent.format(e.action+' '+e.name, \
left, top, '%.3f'%height, \
width, e.color, '')
# timeline is finished
devtl.html['timeline'] += '</div>\n</div>\n'
# draw a legend which describes the phases by color
data = testruns[-1]
devtl.html['legend'] = '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html['legend'] += html_legend.format(order, \
data.dmesg[phase]['color'], name)
devtl.html['legend'] += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
t0 {color:black;font: bold 30px Times;}\n\
t1 {color:black;font: 30px Times;}\n\
t2 {color:black;font: 25px Times;}\n\
t3 {color:black;font: 20px Times;white-space:nowrap;}\n\
t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.hover {background-color:white;border:1px solid red;z-index:10;}\n\
.traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\
.t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.devlist {position:'+x2changes[1]+';width:190px;}\n\
#devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# write the test title and general info header
if(sysvals.stamp['time'] != ""):
hf.write(headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'], \
sysvals.stamp['time']))
# write the device timeline
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
data = testruns[-1]
if(sysvals.usecallgraph):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in sysvals.altdevname):
name = sysvals.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \
((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \
num, name+' '+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \
line.time*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write('\n\n </section>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "thread hover";\n'\
' } else {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = title.slice(0, title.indexOf(" "));\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(idlist.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var sx = e.clientX;\n'\
' if(sx > window.innerWidth - 440)\n'\
' sx = window.innerWidth - 440;\n'\
' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\
' var win = window.open("", "_blank", cfg);\n'\
' if(window.chrome) win.moveBy(sx, 0);\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var devlist = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < devlist.length; i++)\n'\
' devlist[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
global sysvals
detectUSB(False)
t0 = time.time()*1000
tp = sysvals.tpath
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system('dmesg -C')
# enable callgraph ftrace only for the second run
if(sysvals.usecallgraph and count == 2):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | '+\
'grep dpm_run_callback > '+tp+'set_graph_function')
# if this is test2 and there's a delay, start here
if(count > 1 and sysvals.x2delay > 0):
tN = time.time()*1000
while (tN - t0) < sysvals.x2delay:
tN = time.time()*1000
time.sleep(0.001)
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
os.system('echo 1 > '+tp+'tracing_on')
# initiate suspend
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo SUSPEND START > '+tp+'trace_marker')
if(sysvals.rtcwake):
print('SUSPEND START')
print('will autoresume in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarm()
else:
print('SUSPEND START (press a key to resume)')
pf = open(sysvals.powerfile, 'w')
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
t0 = time.time()*1000
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo RESUME COMPLETE > '+tp+'trace_marker')
# see if there's firmware timing data to be had
t = sysvals.postresumetime
if(t > 0):
print('Waiting %d seconds for POST-RESUME trace events...' % t)
time.sleep(t)
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo 0 > '+tp+'tracing_on')
print('CAPTURING TRACE')
writeDatafileHeader(sysvals.ftracefile)
os.system('cat '+tp+'trace >> '+sysvals.ftracefile)
os.system('echo "" > '+tp+'trace')
# grab a copy of the dmesg output
print('CAPTURING DMESG')
writeDatafileHeader(sysvals.dmesgfile)
os.system('dmesg -c >> '+sysvals.dmesgfile)
def writeDatafileHeader(filename):
global sysvals
fw = getFPDT(False)
prt = sysvals.postresumetime
fp = open(filename, 'a')
fp.write(sysvals.teststamp+'\n')
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if(prt > 0):
fp.write('# post resume time %u\n' % prt)
fp.close()
# Function: executeAndroidSuspend
# Description:
# Execute system suspend through the sysfs interface
# on a remote android device, then transfer the output
# dmesg and ftrace files to the local output directory.
def executeAndroidSuspend():
global sysvals
# check to see if the display is currently off
tp = sysvals.tpath
out = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
# if so we need to turn it on so we can issue a new suspend
if(out.endswith('false')):
print('Waking the device up for the test...')
# send the KEYPAD_POWER keyevent to wake it up
os.system(sysvals.adb+' shell input keyevent 26')
# wait a few seconds so the user can see the device wake up
time.sleep(3)
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1')
# start ftrace
if(sysvals.usetraceevents):
print('START TRACING')
os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'")
# initiate suspend
for count in range(1,sysvals.execcount+1):
if(sysvals.usetraceevents):
os.system(sysvals.adb+\
" shell 'echo SUSPEND START > "+tp+"trace_marker'")
print('SUSPEND START (press a key on the device to resume)')
os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\
" > "+sysvals.powerfile+"'")
# execution will pause here, then adb will exit
while(True):
check = os.popen(sysvals.adb+\
' shell pwd 2>/dev/null').read().strip()
if(len(check) > 0):
break
time.sleep(1)
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\
"trace_marker'")
# return from suspend
print('RESUME COMPLETE')
# stop ftrace
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
print('CAPTURING TRACE')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile)
os.system(sysvals.adb+' shell cat '+tp+\
'trace >> '+sysvals.ftracefile)
# grab a copy of the dmesg output
print('CAPTURING DMESG')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile)
os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile)
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
global sysvals
rootCheck()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
os.system('echo auto > %s/power/control' % dirname)
name = dirname.split('/')[-1]
desc = os.popen('cat %s/product 2>/dev/null' % \
dirname).read().replace('\n', '')
ctrl = os.popen('cat %s/power/control 2>/dev/null' % \
dirname).read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
# Arguments:
# output: True to output the info to stdout, False otherwise
def detectUSB(output):
global sysvals
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
if(output):
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = os.popen('cat %s/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(field['product']) > 0):
sysvals.altdevname[name] = \
'%s [%s]' % (field['product'], name)
else:
sysvals.altdevname[name] = \
'%s:%s [%s]' % (field['idVendor'], \
field['idProduct'], name)
if(output):
for i in power:
power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
global sysvals
modes = ''
if(not sysvals.android):
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
else:
line = os.popen(sysvals.adb+' shell cat '+\
sysvals.powerfile).read().strip()
modes = string.split(line)
return modes
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
global sysvals
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
rootCheck()
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file doesnt exist: %s' % sysvals.fpdtpath, False)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.fpdtpath, False)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file doesnt exist: %s' % sysvals.mempath, False)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.mempath, False)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes', False)
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
continue
if(header[1] != 16):
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False)
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck():
global sysvals
status = True
if(sysvals.android):
print('Checking the android system ...')
else:
print('Checking this system (%s)...' % platform.node())
# check if adb is connected to a device
if(sysvals.android):
res = 'NO'
out = os.popen(sysvals.adb+' get-state').read().strip()
if(out == 'device'):
res = 'YES'
print(' is android device connected: %s' % res)
if(res != 'YES'):
print(' Please connect the device before using this tool')
return False
# check we have root access
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell id').read().strip()
if('root' in out):
res = 'YES'
else:
if(os.environ['USER'] == 'root'):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
if(sysvals.android):
print(' Try running "adb root" to restart the daemon as root')
else:
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+\
sysvals.powerfile).read().strip()
if(out == sysvals.powerfile):
res = 'YES'
else:
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
res = 'NO'
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if the tool can unlock the device
if(sysvals.android):
res = 'YES'
out1 = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
out2 = os.popen(sysvals.adb+\
' shell input').read().strip()
if(not out1.startswith('mScreenOn') or not out2.startswith('usage')):
res = 'NO (wake the android device up before running the test)'
print(' can I unlock the screen: %s' % res)
# check if ftrace is available
res = 'NO'
ftgood = verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls -d '+\
sysvals.epath+e).read().strip()
if(out == sysvals.epath+e):
check = True
else:
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = 'NO'
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: doWarning
# Description:
# generic warning function for non-catastrophic anomalies
# Arguments:
# msg: the warning message to print
# file: If not empty, a filename to request be sent to the owner for debug
def doWarning(msg, file):
print('/* %s */') % msg
if(file):
print('/* For a fix, please send this'+\
' %s file to <todd.e.brandt@intel.com> */' % file)
# Function: rootCheck
# Description:
# quick check to see if we have root access
def rootCheck():
if(os.environ['USER'] != 'root'):
doError('This script must be run as root', False)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max):
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
global sysvals
if(sysvals.ftracefile != ''):
doesTraceLogHaveTraceEvents()
if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly):
doError('recreating this html output '+\
'requires a dmesg file', False)
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile != ''):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(subdir):
global sysvals
# prepare for the test
if(not sysvals.android):
initFtrace()
else:
initFtraceAndroid()
sysvals.initTestOutput(subdir)
vprint('Output files:\n %s' % sysvals.dmesgfile)
if(sysvals.usecallgraph or
sysvals.usetraceevents or
sysvals.usetraceeventsonly):
vprint(' %s' % sysvals.ftracefile)
vprint(' %s' % sysvals.htmlfile)
# execute the test
if(not sysvals.android):
executeSuspend()
else:
executeAndroidSuspend()
# analyze the data and create the html output
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
# data for kernels 3.15 or newer is entirely in ftrace
testruns = parseTraceLog()
else:
# data for kernels older than 3.15 is primarily in dmesg
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.usecallgraph or sysvals.usetraceevents):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, output):
global sysvals
# get a list of ftrace output files
files = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(re.match('.*_ftrace.txt', filename)):
files.append("%s/%s" % (dirname, filename))
# process the files in order and get an array of data objects
testruns = []
for file in sorted(files):
if output:
print("Test found in %s" % os.path.dirname(file))
sysvals.ftracefile = file
sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt')
doesTraceLogHaveTraceEvents()
sysvals.usecallgraph = False
if not sysvals.usetraceeventsonly:
if(not os.path.exists(sysvals.dmesgfile)):
print("Skipping %s: not a valid test input" % file)
continue
else:
if output:
f = os.path.basename(sysvals.ftracefile)
d = os.path.basename(sysvals.dmesgfile)
print("\tInput files: %s and %s" % (f, d))
testdata = loadKernelLog()
data = testdata[0]
parseKernelLog(data)
testdata = [data]
appendIncompleteTraceLog(testdata)
else:
if output:
print("\tInput file: %s" % os.path.basename(sysvals.ftracefile))
testdata = parseTraceLog()
data = testdata[0]
data.normalizeTime(data.tSuspended)
link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html')
data.outfile = link
testruns.append(data)
createHTMLSummarySimple(testruns, subdir+'/summary.html')
# Function: printHelp
# Description:
# print out the help text
def printHelp():
global sysvals
modes = getModes()
print('')
print('AnalyzeSuspend v%.1f' % sysvals.version)
print('Usage: sudo analyze_suspend.py <options>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' [general]')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -verbose Print extra information during execution and analysis')
print(' -status Test to see if the system is enabled to run this tool')
print(' -modes List available suspend modes')
print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)')
print(' [advanced]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -filter "d1 d2 ..." Filter out all but this list of dev names')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)')
print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [utilities]')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' [android testing]')
print(' -adb binary Use the given adb binary to run the test on an android device.')
print(' The device should already be connected and with root access.')
print(' Commands will be executed on the device using "adb shell"')
print(' [re-analyze data from previous runs]')
print(' -ftrace ftracefile Create HTML output using ftrace input')
print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)')
print(' -summary directory Create a summary of all test in this dir')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
cmdarg = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
sysvals.suspendmode = val
elif(arg == '-adb'):
try:
val = args.next()
except:
doError('No adb binary supplied', True)
if(not os.path.exists(val)):
doError('file doesnt exist: %s' % val, False)
if(not os.access(val, os.X_OK)):
doError('file isnt executable: %s' % val, False)
try:
check = os.popen(val+' version').read().strip()
except:
doError('adb version failed to execute', False)
if(not re.match('Android Debug Bridge .*', check)):
doError('adb version failed to execute', False)
sysvals.adb = val
sysvals.android = True
elif(arg == '-x2'):
if(sysvals.postresumetime > 0):
doError('-x2 is not compatible with -postres', False)
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-postres'):
if(sysvals.execcount != 1):
doError('-x2 is not compatible with -postres', False)
sysvals.postresumetime = getArgInt('-postres', args, 0, 3600)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-modes'):
cmd = 'modes'
elif(arg == '-fpdt'):
cmd = 'fpdt'
elif(arg == '-usbtopo'):
cmd = 'usbtopo'
elif(arg == '-usbauto'):
cmd = 'usbauto'
elif(arg == '-status'):
cmd = 'status'
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-v'):
print("Version %.1f" % sysvals.version)
sys.exit()
elif(arg == '-rtcwake'):
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600)
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.usecallgraph = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
cmdarg = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s isnt accesible' % val, False)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-h'):
printHelp()
sys.exit()
else:
doError('Invalid argument: '+arg, True)
# just run a utility command and exit
if(cmd != ''):
if(cmd == 'status'):
statusCheck()
elif(cmd == 'fpdt'):
if(sysvals.android):
doError('cannot read FPDT on android device', False)
getFPDT(True)
elif(cmd == 'usbtopo'):
if(sysvals.android):
doError('cannot read USB topology '+\
'on an android device', False)
detectUSB(True)
elif(cmd == 'modes'):
modes = getModes()
print modes
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
print("Generating a summary of folder \"%s\"" % cmdarg)
runSummary(cmdarg, True)
sys.exit()
# run test on android device
if(sysvals.android):
if(sysvals.usecallgraph):
doError('ftrace (-f) is not yet supported '+\
'in the android kernel', False)
if(sysvals.notestrun):
doError('cannot analyze test files on the '+\
'android device', False)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
if multitest['run']:
# run multiple tests in a separte subdirectory
s = 'x%d' % multitest['count']
subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S')
os.mkdir(subdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
runTest(subdir)
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(subdir, False)
else:
# run the test in the current directory
runTest(".")
| gpl-2.0 |
retomerz/intellij-community | python/lib/Lib/UserDict.py | 117 | 5729 | """A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| apache-2.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/test/test_collections.py | 60 | 63093 | """Unit tests for collections.py."""
import unittest, doctest, operator
from test.support import TESTFN, forget, unlink
import inspect
from test import support
from collections import namedtuple, Counter, OrderedDict, _count_elements
from test import mapping_tests
import pickle, copy
from random import randrange, shuffle
import keyword
import re
import sys
from collections import UserDict
from collections import ChainMap
from collections.abc import Hashable, Iterable, Iterator
from collections.abc import Sized, Container, Callable
from collections.abc import Set, MutableSet
from collections.abc import Mapping, MutableMapping, KeysView, ItemsView
from collections.abc import Sequence, MutableSequence
from collections.abc import ByteString
################################################################################
### ChainMap (helper class for configparser and the string module)
################################################################################
class TestChainMap(unittest.TestCase):
def test_basics(self):
c = ChainMap()
c['a'] = 1
c['b'] = 2
d = c.new_child()
d['b'] = 20
d['c'] = 30
self.assertEqual(d.maps, [{'b':20, 'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertEqual(d.items(), dict(a=1, b=20, c=30).items()) # check items/iter/getitem
self.assertEqual(len(d), 3) # check len
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, b=20, c=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
del d['b'] # unmask a value
self.assertEqual(d.maps, [{'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertEqual(d.items(), dict(a=1, b=2, c=30).items()) # check items/iter/getitem
self.assertEqual(len(d), 3) # check len
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, b=2, c=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
self.assertIn(repr(d), [ # check repr
type(d).__name__ + "({'c': 30}, {'a': 1, 'b': 2})",
type(d).__name__ + "({'c': 30}, {'b': 2, 'a': 1})"
])
for e in d.copy(), copy.copy(d): # check shallow copies
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
self.assertIsNot(d.maps[0], e.maps[0])
for m1, m2 in zip(d.maps[1:], e.maps[1:]):
self.assertIs(m1, m2)
for e in [pickle.loads(pickle.dumps(d)),
copy.deepcopy(d),
eval(repr(d))
]: # check deep copies
self.assertEqual(d, e)
self.assertEqual(d.maps, e.maps)
self.assertIsNot(d, e)
for m1, m2 in zip(d.maps, e.maps):
self.assertIsNot(m1, m2, e)
f = d.new_child()
f['b'] = 5
self.assertEqual(f.maps, [{'b': 5}, {'c':30}, {'a':1, 'b':2}])
self.assertEqual(f.parents.maps, [{'c':30}, {'a':1, 'b':2}]) # check parents
self.assertEqual(f['b'], 5) # find first in chain
self.assertEqual(f.parents['b'], 2) # look beyond maps[0]
def test_contructor(self):
self.assertEqual(ChainMap().maps, [{}]) # no-args --> one new dict
self.assertEqual(ChainMap({1:2}).maps, [{1:2}]) # 1 arg --> list
def test_bool(self):
self.assertFalse(ChainMap())
self.assertFalse(ChainMap({}, {}))
self.assertTrue(ChainMap({1:2}, {}))
self.assertTrue(ChainMap({}, {1:2}))
def test_missing(self):
class DefaultChainMap(ChainMap):
def __missing__(self, key):
return 999
d = DefaultChainMap(dict(a=1, b=2), dict(b=20, c=30))
for k, v in dict(a=1, b=2, c=30, d=999).items():
self.assertEqual(d[k], v) # check __getitem__ w/missing
for k, v in dict(a=1, b=2, c=30, d=77).items():
self.assertEqual(d.get(k, 77), v) # check get() w/ missing
for k, v in dict(a=True, b=True, c=True, d=False).items():
self.assertEqual(k in d, v) # check __contains__ w/missing
self.assertEqual(d.pop('a', 1001), 1, d)
self.assertEqual(d.pop('a', 1002), 1002) # check pop() w/missing
self.assertEqual(d.popitem(), ('b', 2)) # check popitem() w/missing
with self.assertRaises(KeyError):
d.popitem()
def test_dict_coercion(self):
d = ChainMap(dict(a=1, b=2), dict(b=20, c=30))
self.assertEqual(dict(d), dict(a=1, b=2, c=30))
self.assertEqual(dict(d.items()), dict(a=1, b=2, c=30))
def test_new_child(self):
'Tests for changes for issue #16613.'
c = ChainMap()
c['a'] = 1
c['b'] = 2
m = {'b':20, 'c': 30}
d = c.new_child(m)
self.assertEqual(d.maps, [{'b':20, 'c':30}, {'a':1, 'b':2}]) # check internal state
self.assertIs(m, d.maps[0])
# Use a different map than a dict
class lowerdict(dict):
def __getitem__(self, key):
if isinstance(key, str):
key = key.lower()
return dict.__getitem__(self, key)
def __contains__(self, key):
if isinstance(key, str):
key = key.lower()
return dict.__contains__(self, key)
c = ChainMap()
c['a'] = 1
c['b'] = 2
m = lowerdict(b=20, c=30)
d = c.new_child(m)
self.assertIs(m, d.maps[0])
for key in 'abc': # check contains
self.assertIn(key, d)
for k, v in dict(a=1, B=20, C=30, z=100).items(): # check get
self.assertEqual(d.get(k, 100), v)
################################################################################
### Named Tuples
################################################################################
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertIn('class Point(tuple)', Point._source)
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', 'the quick brown fox') # check unicode input
self.assertNotIn("u'", repr(nt._fields))
nt = namedtuple('nt', ('the', 'quick')) # check unicode input
self.assertNotIn("u'", repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_factory_doc_attr(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__doc__, 'Point(x, y)')
def test_name_fixer(self):
for spec, renamed in [
[('efg', 'g%hi'), ('efg', '_1')], # field with non-alpha char
[('abc', 'class'), ('abc', '_1')], # field has keyword
[('8efg', '9ghi'), ('_0', '_1')], # field starts with digit
[('abc', '_efg'), ('abc', '_1')], # field with leading underscore
[('abc', 'efg', 'efg', 'ghi'), ('abc', 'efg', '_2', 'ghi')], # duplicate field
[('abc', '', 'x'), ('abc', '_1', 'x')], # fieldname is a space
]:
self.assertEqual(namedtuple('NT', spec, rename=True)._fields, renamed)
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assertNotIn('__weakref__', dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
self.assertEqual(vars(p), p._asdict()) # verify that vars() works
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertIsInstance(p, tuple)
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
# n = 5000
n = 254 # SyntaxError: more than 255 arguments:
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = list(range(n))
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in (pickle,):
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in range(-1, module.HIGHEST_PROTOCOL + 1):
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
self.assertNotIn(b'OrderedDict', dumps(p, protocol))
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
# Some names like "self", "cls", "tuple", "itemgetter", and "property"
# failed when used as field names. Test to make sure these now work.
T = namedtuple('T', 'itemgetter property self cls tuple')
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1,2,3,4,5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10,20,30,40,50))
# Broader test of all interesting names in a template
with support.captured_stdout() as template:
T = namedtuple('T', 'x', verbose=True)
words = set(re.findall('[A-Za-z]+', template.getvalue()))
words -= set(keyword.kwlist)
T = namedtuple('T', words)
# test __new__
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._fields, values)))
self.assertEqual(t, values)
# test _make
t = T._make(values)
self.assertEqual(t, values)
# exercise __repr__
repr(t)
# test _asdict
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
# test _replace
t = T._make(values)
newvalues = tuple(v*10 for v in values)
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
# test _fields
self.assertEqual(T._fields, tuple(words))
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
def test_repr(self):
with support.captured_stdout() as template:
A = namedtuple('A', 'x', verbose=True)
self.assertEqual(repr(A(1)), 'A(x=1)')
# repr should show the name of the subclass
class B(A):
pass
self.assertEqual(repr(B(1)), 'B(x=1)')
def test_source(self):
# verify that _source can be run through exec()
tmp = namedtuple('NTColor', 'red green blue')
globals().pop('NTColor', None) # remove artifacts from other tests
exec(tmp._source, globals())
self.assertIn('NTColor', globals())
c = NTColor(10, 20, 30)
self.assertEqual((c.red, c.green, c.blue), (10, 20, 30))
self.assertEqual(NTColor._fields, ('red', 'green', 'blue'))
globals().pop('NTColor', None) # clean-up after this test
################################################################################
### Abstract Base Classes
################################################################################
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
def validate_isinstance(self, abc, name):
stub = lambda s, *args: 0
C = type('C', (object,), {'__hash__': None})
setattr(C, name, stub)
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
C = type('C', (object,), {'__hash__': None})
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
def validate_comparison(self, instance):
ops = ['lt', 'gt', 'le', 'ge', 'ne', 'or', 'and', 'xor', 'sub']
operators = {}
for op in ops:
name = '__' + op + '__'
operators[name] = getattr(operator, name)
class Other:
def __init__(self):
self.right_side = False
def __eq__(self, other):
self.right_side = True
return True
__lt__ = __eq__
__gt__ = __eq__
__le__ = __eq__
__ge__ = __eq__
__ne__ = __eq__
__ror__ = __eq__
__rand__ = __eq__
__rxor__ = __eq__
__rsub__ = __eq__
for name, op in operators.items():
if not hasattr(instance, name):
continue
other = Other()
op(instance, other)
self.assertTrue(other.right_side,'Right side not called for %s.%s'
% (type(instance), name))
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [bytearray(), list(), set(), dict()]
for x in non_samples:
self.assertNotIsInstance(x, Hashable)
self.assertFalse(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type, bytes()
]
for x in samples:
self.assertIsInstance(x, Hashable)
self.assertTrue(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super().__hash__()
self.assertEqual(hash(H()), 0)
self.assertFalse(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
self.validate_isinstance(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.assertNotIsInstance(x, Iterable)
self.assertFalse(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterable)
self.assertTrue(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super().__iter__()
self.assertEqual(list(I()), [])
self.assertFalse(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
self.validate_isinstance(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, b"", "", (), [], {}, set()]
for x in non_samples:
self.assertNotIsInstance(x, Iterator)
self.assertFalse(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(bytes()), iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertTrue(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, '__next__', '__iter__')
# Issue 10565
class NextOnly:
def __next__(self):
yield 1
raise StopIteration
self.assertNotIsInstance(NextOnly(), Iterator)
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Sized)
self.assertFalse(issubclass(type(x), Sized), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.assertIsInstance(x, Sized)
self.assertTrue(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
self.validate_isinstance(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Container)
self.assertFalse(issubclass(type(x), Container), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.assertIsInstance(x, Container)
self.assertTrue(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
self.validate_isinstance(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", b"", (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Callable)
self.assertFalse(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.assertIsInstance(x, Callable)
self.assertTrue(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
self.validate_isinstance(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.assertTrue(issubclass(C, B))
self.assertFalse(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__hash__ = None # Make sure it isn't hashable by default
self.assertFalse(issubclass(C, B), B.__name__)
B.register(C)
self.assertTrue(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.assertIsInstance(sample(), Set)
self.assertTrue(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
class MySet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
self.validate_comparison(MySet())
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.assertTrue(hash(a) == hash(b))
def test_MutableSet(self):
self.assertIsInstance(set(), MutableSet)
self.assertTrue(issubclass(set, MutableSet))
self.assertNotIsInstance(frozenset(), MutableSet)
self.assertFalse(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
s = MySet([5,43,2,1])
self.assertEqual(s.pop(), 1)
def test_issue8750(self):
empty = WithSet()
full = WithSet(range(10))
s = WithSet(full)
s -= s
self.assertEqual(s, empty)
s = WithSet(full)
s ^= s
self.assertEqual(s, empty)
s = WithSet(full)
s &= s
self.assertEqual(s, full)
s |= s
self.assertEqual(s, full)
def test_issue16373(self):
# Recursion error comparing comparable and noncomparable
# Set instances
class MyComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
class MyNonComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
def __le__(self, x):
return NotImplemented
def __lt__(self, x):
return NotImplemented
cs = MyComparableSet()
ncs = MyNonComparableSet()
self.assertFalse(ncs < cs)
self.assertTrue(ncs <= cs)
self.assertFalse(ncs > cs)
self.assertTrue(ncs >= cs)
def assertSameSet(self, s1, s2):
# coerce both to a real set then check equality
self.assertSetEqual(set(s1), set(s2))
def test_Set_interoperability_with_real_sets(self):
# Issue: 8743
class ListSet(Set):
def __init__(self, elements=()):
self.data = []
for elem in elements:
if elem not in self.data:
self.data.append(elem)
def __contains__(self, elem):
return elem in self.data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return 'Set({!r})'.format(self.data)
r1 = set('abc')
r2 = set('bcd')
r3 = set('abcde')
f1 = ListSet('abc')
f2 = ListSet('bcd')
f3 = ListSet('abcde')
l1 = list('abccba')
l2 = list('bcddcb')
l3 = list('abcdeedcba')
target = r1 & r2
self.assertSameSet(f1 & f2, target)
self.assertSameSet(f1 & r2, target)
self.assertSameSet(r2 & f1, target)
self.assertSameSet(f1 & l2, target)
target = r1 | r2
self.assertSameSet(f1 | f2, target)
self.assertSameSet(f1 | r2, target)
self.assertSameSet(r2 | f1, target)
self.assertSameSet(f1 | l2, target)
fwd_target = r1 - r2
rev_target = r2 - r1
self.assertSameSet(f1 - f2, fwd_target)
self.assertSameSet(f2 - f1, rev_target)
self.assertSameSet(f1 - r2, fwd_target)
self.assertSameSet(f2 - r1, rev_target)
self.assertSameSet(r1 - f2, fwd_target)
self.assertSameSet(r2 - f1, rev_target)
self.assertSameSet(f1 - l2, fwd_target)
self.assertSameSet(f2 - l1, rev_target)
target = r1 ^ r2
self.assertSameSet(f1 ^ f2, target)
self.assertSameSet(f1 ^ r2, target)
self.assertSameSet(r2 ^ f1, target)
self.assertSameSet(f1 ^ l2, target)
# Don't change the following to use assertLess or other
# "more specific" unittest assertions. The current
# assertTrue/assertFalse style makes the pattern of test
# case combinations clear and allows us to know for sure
# the exact operator being invoked.
# proper subset
self.assertTrue(f1 < f3)
self.assertFalse(f1 < f1)
self.assertFalse(f1 < f2)
self.assertTrue(r1 < f3)
self.assertFalse(r1 < f1)
self.assertFalse(r1 < f2)
self.assertTrue(r1 < r3)
self.assertFalse(r1 < r1)
self.assertFalse(r1 < r2)
with self.assertRaises(TypeError):
f1 < l3
with self.assertRaises(TypeError):
f1 < l1
with self.assertRaises(TypeError):
f1 < l2
# any subset
self.assertTrue(f1 <= f3)
self.assertTrue(f1 <= f1)
self.assertFalse(f1 <= f2)
self.assertTrue(r1 <= f3)
self.assertTrue(r1 <= f1)
self.assertFalse(r1 <= f2)
self.assertTrue(r1 <= r3)
self.assertTrue(r1 <= r1)
self.assertFalse(r1 <= r2)
with self.assertRaises(TypeError):
f1 <= l3
with self.assertRaises(TypeError):
f1 <= l1
with self.assertRaises(TypeError):
f1 <= l2
# proper superset
self.assertTrue(f3 > f1)
self.assertFalse(f1 > f1)
self.assertFalse(f2 > f1)
self.assertTrue(r3 > r1)
self.assertFalse(f1 > r1)
self.assertFalse(f2 > r1)
self.assertTrue(r3 > r1)
self.assertFalse(r1 > r1)
self.assertFalse(r2 > r1)
with self.assertRaises(TypeError):
f1 > l3
with self.assertRaises(TypeError):
f1 > l1
with self.assertRaises(TypeError):
f1 > l2
# any superset
self.assertTrue(f3 >= f1)
self.assertTrue(f1 >= f1)
self.assertFalse(f2 >= f1)
self.assertTrue(r3 >= r1)
self.assertTrue(f1 >= r1)
self.assertFalse(f2 >= r1)
self.assertTrue(r3 >= r1)
self.assertTrue(r1 >= r1)
self.assertFalse(r2 >= r1)
with self.assertRaises(TypeError):
f1 >= l3
with self.assertRaises(TypeError):
f1 >=l1
with self.assertRaises(TypeError):
f1 >= l2
# equality
self.assertTrue(f1 == f1)
self.assertTrue(r1 == f1)
self.assertTrue(f1 == r1)
self.assertFalse(f1 == f3)
self.assertFalse(r1 == f3)
self.assertFalse(f1 == r3)
self.assertFalse(f1 == l3)
self.assertFalse(f1 == l1)
self.assertFalse(f1 == l2)
# inequality
self.assertFalse(f1 != f1)
self.assertFalse(r1 != f1)
self.assertFalse(f1 != r1)
self.assertTrue(f1 != f3)
self.assertTrue(r1 != f3)
self.assertTrue(f1 != r3)
self.assertTrue(f1 != l3)
self.assertTrue(f1 != l1)
self.assertTrue(f1 != l2)
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
self.assertTrue(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
class MyMapping(Mapping):
def __len__(self):
return 0
def __getitem__(self, i):
raise IndexError
def __iter__(self):
return iter(())
self.validate_comparison(MyMapping())
def test_MutableMapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), MutableMapping)
self.assertTrue(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_MutableMapping_subclass(self):
# Test issue 9214
mymap = UserDict()
mymap['red'] = 5
self.assertIsInstance(mymap.keys(), Set)
self.assertIsInstance(mymap.keys(), KeysView)
self.assertIsInstance(mymap.items(), Set)
self.assertIsInstance(mymap.items(), ItemsView)
mymap = UserDict()
mymap['red'] = 5
z = mymap.keys() | {'orange'}
self.assertIsInstance(z, set)
list(z)
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(sorted(z), ['orange', 'red'])
mymap = UserDict()
mymap['red'] = 5
z = mymap.items() | {('orange', 3)}
self.assertIsInstance(z, set)
list(z)
mymap['blue'] = 7 # Shouldn't affect 'z'
self.assertEqual(sorted(z), [('orange', 3), ('red', 5)])
def test_Sequence(self):
for sample in [tuple, list, bytes, str]:
self.assertIsInstance(sample(), Sequence)
self.assertTrue(issubclass(sample, Sequence))
self.assertIsInstance(range(10), Sequence)
self.assertTrue(issubclass(range, Sequence))
self.assertIsInstance(memoryview(b""), Sequence)
self.assertTrue(issubclass(memoryview, Sequence))
self.assertTrue(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_ByteString(self):
for sample in [bytes, bytearray]:
self.assertIsInstance(sample(), ByteString)
self.assertTrue(issubclass(sample, ByteString))
for sample in [str, list, tuple]:
self.assertNotIsInstance(sample(), ByteString)
self.assertFalse(issubclass(sample, ByteString))
self.assertNotIsInstance(memoryview(b""), ByteString)
self.assertFalse(issubclass(memoryview, ByteString))
def test_MutableSequence(self):
for sample in [tuple, str, bytes]:
self.assertNotIsInstance(sample(), MutableSequence)
self.assertFalse(issubclass(sample, MutableSequence))
for sample in [list, bytearray]:
self.assertIsInstance(sample(), MutableSequence)
self.assertTrue(issubclass(sample, MutableSequence))
self.assertFalse(issubclass(str, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
def test_MutableSequence_mixins(self):
# Test the mixins of MutableSequence by creating a miminal concrete
# class inherited from it.
class MutableSequenceSubclass(MutableSequence):
def __init__(self):
self.lst = []
def __setitem__(self, index, value):
self.lst[index] = value
def __getitem__(self, index):
return self.lst[index]
def __len__(self):
return len(self.lst)
def __delitem__(self, index):
del self.lst[index]
def insert(self, index, value):
self.lst.insert(index, value)
mss = MutableSequenceSubclass()
mss.append(0)
mss.extend((1, 2, 3, 4))
self.assertEqual(len(mss), 5)
self.assertEqual(mss[3], 3)
mss.reverse()
self.assertEqual(mss[3], 1)
mss.pop()
self.assertEqual(len(mss), 4)
mss.remove(3)
self.assertEqual(len(mss), 3)
mss += (10, 20, 30)
self.assertEqual(len(mss), 6)
self.assertEqual(mss[-1], 30)
mss.clear()
self.assertEqual(len(mss), 0)
################################################################################
### Counter
################################################################################
class CounterSubclassWithSetItem(Counter):
# Test a counter subclass that overrides __setitem__
def __init__(self, *args, **kwds):
self.called = False
Counter.__init__(self, *args, **kwds)
def __setitem__(self, key, value):
self.called = True
Counter.__setitem__(self, key, value)
class CounterSubclassWithGet(Counter):
# Test a counter subclass that overrides get()
def __init__(self, *args, **kwds):
self.called = False
Counter.__init__(self, *args, **kwds)
def get(self, key, default):
self.called = True
return Counter.get(self, key, default)
class TestCounter(unittest.TestCase):
def test_basics(self):
c = Counter('abcaba')
self.assertEqual(c, Counter({'a':3 , 'b': 2, 'c': 1}))
self.assertEqual(c, Counter(a=3, b=2, c=1))
self.assertIsInstance(c, dict)
self.assertIsInstance(c, Mapping)
self.assertTrue(issubclass(Counter, dict))
self.assertTrue(issubclass(Counter, Mapping))
self.assertEqual(len(c), 3)
self.assertEqual(sum(c.values()), 6)
self.assertEqual(sorted(c.values()), [1, 2, 3])
self.assertEqual(sorted(c.keys()), ['a', 'b', 'c'])
self.assertEqual(sorted(c), ['a', 'b', 'c'])
self.assertEqual(sorted(c.items()),
[('a', 3), ('b', 2), ('c', 1)])
self.assertEqual(c['b'], 2)
self.assertEqual(c['z'], 0)
self.assertEqual(c.__contains__('c'), True)
self.assertEqual(c.__contains__('z'), False)
self.assertEqual(c.get('b', 10), 2)
self.assertEqual(c.get('z', 10), 10)
self.assertEqual(c, dict(a=3, b=2, c=1))
self.assertEqual(repr(c), "Counter({'a': 3, 'b': 2, 'c': 1})")
self.assertEqual(c.most_common(), [('a', 3), ('b', 2), ('c', 1)])
for i in range(5):
self.assertEqual(c.most_common(i),
[('a', 3), ('b', 2), ('c', 1)][:i])
self.assertEqual(''.join(sorted(c.elements())), 'aaabbc')
c['a'] += 1 # increment an existing value
c['b'] -= 2 # sub existing value to zero
del c['c'] # remove an entry
del c['c'] # make sure that del doesn't raise KeyError
c['d'] -= 2 # sub from a missing value
c['e'] = -5 # directly assign a missing value
c['f'] += 4 # add to a missing value
self.assertEqual(c, dict(a=4, b=0, d=-2, e=-5, f=4))
self.assertEqual(''.join(sorted(c.elements())), 'aaaaffff')
self.assertEqual(c.pop('f'), 4)
self.assertNotIn('f', c)
for i in range(3):
elem, cnt = c.popitem()
self.assertNotIn(elem, c)
c.clear()
self.assertEqual(c, {})
self.assertEqual(repr(c), 'Counter()')
self.assertRaises(NotImplementedError, Counter.fromkeys, 'abc')
self.assertRaises(TypeError, hash, c)
c.update(dict(a=5, b=3))
c.update(c=1)
c.update(Counter('a' * 50 + 'b' * 30))
c.update() # test case with no args
c.__init__('a' * 500 + 'b' * 300)
c.__init__('cdc')
c.__init__()
self.assertEqual(c, dict(a=555, b=333, c=3, d=1))
self.assertEqual(c.setdefault('d', 5), 1)
self.assertEqual(c['d'], 1)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(c['e'], 5)
def test_copying(self):
# Check that counters are copyable, deepcopyable, picklable, and
#have a repr/eval round-trip
words = Counter('which witch had which witches wrist watch'.split())
update_test = Counter()
update_test.update(words)
for label, dup in [
('words.copy()', words.copy()),
('copy.copy(words)', copy.copy(words)),
('copy.deepcopy(words)', copy.deepcopy(words)),
('pickle.loads(pickle.dumps(words, 0))',
pickle.loads(pickle.dumps(words, 0))),
('pickle.loads(pickle.dumps(words, 1))',
pickle.loads(pickle.dumps(words, 1))),
('pickle.loads(pickle.dumps(words, 2))',
pickle.loads(pickle.dumps(words, 2))),
('pickle.loads(pickle.dumps(words, -1))',
pickle.loads(pickle.dumps(words, -1))),
('eval(repr(words))', eval(repr(words))),
('update_test', update_test),
('Counter(words)', Counter(words)),
]:
with self.subTest(label=label):
msg = "\ncopy: %s\nwords: %s" % (dup, words)
self.assertIsNot(dup, words, msg)
self.assertEqual(dup, words)
def test_copy_subclass(self):
class MyCounter(Counter):
pass
c = MyCounter('slartibartfast')
d = c.copy()
self.assertEqual(d, c)
self.assertEqual(len(d), len(c))
self.assertEqual(type(d), type(c))
def test_conversions(self):
# Convert to: set, list, dict
s = 'she sells sea shells by the sea shore'
self.assertEqual(sorted(Counter(s).elements()), sorted(s))
self.assertEqual(sorted(Counter(s)), sorted(set(s)))
self.assertEqual(dict(Counter(s)), dict(Counter(s).items()))
self.assertEqual(set(Counter(s)), set(s))
def test_invariant_for_the_in_operator(self):
c = Counter(a=10, b=-2, c=0)
for elem in c:
self.assertTrue(elem in c)
self.assertIn(elem, c)
def test_multiset_operations(self):
# Verify that adding a zero counter will strip zeros and negatives
c = Counter(a=10, b=-2, c=0) + Counter()
self.assertEqual(dict(c), dict(a=10))
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for counterop, numberop in [
(Counter.__add__, lambda x, y: max(0, x+y)),
(Counter.__sub__, lambda x, y: max(0, x-y)),
(Counter.__or__, lambda x, y: max(0,x,y)),
(Counter.__and__, lambda x, y: max(0, min(x,y))),
]:
result = counterop(p, q)
for x in elements:
self.assertEqual(numberop(p[x], q[x]), result[x],
(counterop, x, p, q))
# verify that results exclude non-positive counts
self.assertTrue(x>0 for x in result.values())
elements = 'abcdef'
for i in range(100):
# verify that random multisets with no repeats are exactly like sets
p = Counter(dict((elem, randrange(0, 2)) for elem in elements))
q = Counter(dict((elem, randrange(0, 2)) for elem in elements))
for counterop, setop in [
(Counter.__sub__, set.__sub__),
(Counter.__or__, set.__or__),
(Counter.__and__, set.__and__),
]:
counter_result = counterop(p, q)
set_result = setop(set(p.elements()), set(q.elements()))
self.assertEqual(counter_result, dict.fromkeys(set_result, 1))
def test_inplace_operations(self):
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for inplace_op, regular_op in [
(Counter.__iadd__, Counter.__add__),
(Counter.__isub__, Counter.__sub__),
(Counter.__ior__, Counter.__or__),
(Counter.__iand__, Counter.__and__),
]:
c = p.copy()
c_id = id(c)
regular_result = regular_op(c, q)
inplace_result = inplace_op(c, q)
self.assertEqual(inplace_result, regular_result)
self.assertEqual(id(inplace_result), c_id)
def test_subtract(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50)
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(Counter(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50))
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter('aaabbcd')
c.subtract('aaaabbcce')
self.assertEqual(c, Counter(a=-1, b=0, c=-1, d=1, e=-1))
def test_unary(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
self.assertEqual(dict(+c), dict(c=5, d=10, e=15, g=40))
self.assertEqual(dict(-c), dict(a=5))
def test_repr_nonsortable(self):
c = Counter(a=2, b=None)
r = repr(c)
self.assertIn("'a': 2", r)
self.assertIn("'b': None", r)
def test_helper_function(self):
# two paths, one for real dicts and one for other mappings
elems = list('abracadabra')
d = dict()
_count_elements(d, elems)
self.assertEqual(d, {'a': 5, 'r': 2, 'b': 2, 'c': 1, 'd': 1})
m = OrderedDict()
_count_elements(m, elems)
self.assertEqual(m,
OrderedDict([('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]))
# test fidelity to the pure python version
c = CounterSubclassWithSetItem('abracadabra')
self.assertTrue(c.called)
self.assertEqual(dict(c), {'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r':2 })
c = CounterSubclassWithGet('abracadabra')
self.assertTrue(c.called)
self.assertEqual(dict(c), {'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r':2 })
################################################################################
### OrderedDict
################################################################################
class TestOrderedDict(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args,
['self'])
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_abc(self):
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(list(od.keys()), [t[0] for t in pairs])
self.assertEqual(list(od.values()), [t[1] for t in pairs])
self.assertEqual(list(od.items()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
update_test = OrderedDict()
update_test.update(od)
for label, dup in [
('od.copy()', od.copy()),
('copy.copy(od)', copy.copy(od)),
('copy.deepcopy(od)', copy.deepcopy(od)),
('pickle.loads(pickle.dumps(od, 0))',
pickle.loads(pickle.dumps(od, 0))),
('pickle.loads(pickle.dumps(od, 1))',
pickle.loads(pickle.dumps(od, 1))),
('pickle.loads(pickle.dumps(od, 2))',
pickle.loads(pickle.dumps(od, 2))),
('pickle.loads(pickle.dumps(od, 3))',
pickle.loads(pickle.dumps(od, 3))),
('pickle.loads(pickle.dumps(od, -1))',
pickle.loads(pickle.dumps(od, -1))),
('eval(repr(od))', eval(repr(od))),
('update_test', update_test),
('OrderedDict(od)', OrderedDict(od)),
]:
with self.subTest(label=label):
msg = "\ncopy: %s\nod: %s" % (dup, od)
self.assertIsNot(dup, od, msg)
self.assertEqual(dup, od)
def test_yaml_linkage(self):
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertIsNone(od.__reduce__()[2])
od.x = 10
self.assertIsNotNone(od.__reduce__()[2])
def test_pickle_recursive(self):
od = OrderedDict()
od[1] = od
for proto in range(-1, pickle.HIGHEST_PROTOCOL + 1):
dup = pickle.loads(pickle.dumps(od, proto))
self.assertIsNot(dup, od)
self.assertEqual(list(dup.keys()), [1])
self.assertIs(dup[1], dup)
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_move_to_end(self):
od = OrderedDict.fromkeys('abcde')
self.assertEqual(list(od), list('abcde'))
od.move_to_end('c')
self.assertEqual(list(od), list('abdec'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('c', 0)
self.assertEqual(list(od), list('cabde'))
od.move_to_end('e')
self.assertEqual(list(od), list('cabde'))
with self.assertRaises(KeyError):
od.move_to_end('x')
def test_sizeof(self):
# Wimpy test: Just verify the reported size is larger than a regular dict
d = dict(a=1)
od = OrderedDict(**d)
self.assertGreater(sys.getsizeof(od), sys.getsizeof(d))
def test_override_update(self):
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class MyOrderedDict(OrderedDict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
################################################################################
### Run tests
################################################################################
import doctest, collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs,
TestCollectionABCs, TestCounter, TestChainMap,
TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
support.run_unittest(*test_classes)
support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.