path
stringlengths 23
146
| source_code
stringlengths 0
261k
|
|---|---|
data/VisTrails/VisTrails/vistrails/core/vistrail/plugin_data.py
|
from __future__ import division
from vistrails.db.domain import DBPluginData
import unittest
import copy
import random
from vistrails.db.domain import IdScope
import vistrails.core
class PluginData(DBPluginData):
def __init__(self, *args, **kwargs):
DBPluginData.__init__(self, *args, **kwargs)
if self.id is None:
self.id = -1
def __copy__(self):
return PluginData.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBPluginData.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = PluginData
return cp
@staticmethod
def convert(_plugin_data):
_plugin_data.__class__ = PluginData
id = DBPluginData.db_id
data = DBPluginData.db_data
def __eq__(self, other):
if type(other) != type(self):
return False
return self.data == other.data
class TestPluginData(unittest.TestCase):
def create_data(self, id=1, data=""):
return PluginData(id=id, data=data)
def test_create(self):
self.create_data(2, "testing the data field")
def test_serialization(self):
import vistrails.core.db.io
p_data1 = self.create_data()
xml_str = vistrails.core.db.io.serialize(p_data1)
p_data2 = vistrails.core.db.io.unserialize(xml_str, PluginData)
self.assertEquals(p_data1, p_data2)
self.assertEquals(p_data1.id, p_data2.id)
|
data/Lawouach/WebSocket-for-Python/ws4py/messaging.py
|
import os
import struct
from ws4py.framing import Frame, OPCODE_CONTINUATION, OPCODE_TEXT, \
OPCODE_BINARY, OPCODE_CLOSE, OPCODE_PING, OPCODE_PONG
from ws4py.compat import unicode, py3k
__all__ = ['Message', 'TextMessage', 'BinaryMessage', 'CloseControlMessage',
'PingControlMessage', 'PongControlMessage']
class Message(object):
def __init__(self, opcode, data=b'', encoding='utf-8'):
"""
A message is a application level entity. It's usually built
from one or many frames. The protocol defines several kind
of messages which are grouped into two sets:
* data messages which can be text or binary typed
* control messages which provide a mechanism to perform
in-band control communication between peers
The ``opcode`` indicates the message type and ``data`` is
the possible message payload.
The payload is held internally as a a :func:`bytearray` as they are
faster than pure strings for append operations.
Unicode data will be encoded using the provided ``encoding``.
"""
self.opcode = opcode
self._completed = False
self.encoding = encoding
if isinstance(data, unicode):
if not encoding:
raise TypeError("unicode data without an encoding")
data = data.encode(encoding)
elif isinstance(data, bytearray):
data = bytes(data)
elif not isinstance(data, bytes):
raise TypeError("%s is not a supported data type" % type(data))
self.data = data
def single(self, mask=False):
"""
Returns a frame bytes with the fin bit set and a random mask.
If ``mask`` is set, automatically mask the frame
using a generated 4-byte token.
"""
mask = os.urandom(4) if mask else None
return Frame(body=self.data, opcode=self.opcode,
masking_key=mask, fin=1).build()
def fragment(self, first=False, last=False, mask=False):
"""
Returns a :class:`ws4py.framing.Frame` bytes.
The behavior depends on the given flags:
* ``first``: the frame uses ``self.opcode`` else a continuation opcode
* ``last``: the frame has its ``fin`` bit set
* ``mask``: the frame is masked using a automatically generated 4-byte token
"""
fin = 1 if last is True else 0
opcode = self.opcode if first is True else OPCODE_CONTINUATION
mask = os.urandom(4) if mask else None
return Frame(body=self.data,
opcode=opcode, masking_key=mask,
fin=fin).build()
@property
def completed(self):
"""
Indicates the the message is complete, meaning
the frame's ``fin`` bit was set.
"""
return self._completed
@completed.setter
def completed(self, state):
"""
Sets the state for this message. Usually
set by the stream's parser.
"""
self._completed = state
def extend(self, data):
"""
Add more ``data`` to the message.
"""
if isinstance(data, bytes):
self.data += data
elif isinstance(data, bytearray):
self.data += bytes(data)
elif isinstance(data, unicode):
self.data += data.encode(self.encoding)
else:
raise TypeError("%s is not a supported data type" % type(data))
def __len__(self):
return len(self.__unicode__())
def __str__(self):
if py3k:
return self.data.decode(self.encoding)
return self.data
def __unicode__(self):
return self.data.decode(self.encoding)
class TextMessage(Message):
def __init__(self, text=None):
Message.__init__(self, OPCODE_TEXT, text)
@property
def is_binary(self):
return False
@property
def is_text(self):
return True
class BinaryMessage(Message):
def __init__(self, bytes=None):
Message.__init__(self, OPCODE_BINARY, bytes, encoding=None)
@property
def is_binary(self):
return True
@property
def is_text(self):
return False
def __len__(self):
return len(self.data)
class CloseControlMessage(Message):
def __init__(self, code=1000, reason=''):
data = b""
if code:
data += struct.pack("!H", code)
if reason is not None:
if isinstance(reason, unicode):
reason = reason.encode('utf-8')
data += reason
Message.__init__(self, OPCODE_CLOSE, data, 'utf-8')
self.code = code
self.reason = reason
def __str__(self):
if py3k:
return self.reason.decode('utf-8')
return self.reason
def __unicode__(self):
return self.reason.decode(self.encoding)
class PingControlMessage(Message):
def __init__(self, data=None):
Message.__init__(self, OPCODE_PING, data)
class PongControlMessage(Message):
def __init__(self, data):
Message.__init__(self, OPCODE_PONG, data)
|
data/Kozea/brigit/setup.py
|
"""
briGit - Very simple git wrapper module
"""
from setuptools import setup, find_packages
VERSION = '1.2'
options = dict(
name="brigit",
version=VERSION,
description="Very simple git wrapper module",
long_description=__doc__,
author="Florian Mounier - Kozea",
author_email="florian.mounier@kozea.fr",
license="BSD",
platforms="Any",
install_requires=['log_colorizer'],
provides=['brigit'],
packages=find_packages(),
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules"])
setup(**options)
|
data/IanLewis/kay/kay/tests/models.py
|
"""
Models for Kay tests.
:Copyright: (c) 2009 Accense Technology, Inc. All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from google.appengine.ext import db
from kay.utils.forms import ValidationError
from kay.utils.forms.modelform import ModelForm
class MaxLengthValidator(object):
def __init__(self, length):
self.length = length
def __call__(self, val):
if len(val) > self.length:
raise ValidationError("Too long")
return True
class TestModel(db.Model):
number = db.IntegerProperty(required=True)
data_field = db.StringProperty(required=True,
validator=MaxLengthValidator(20))
is_active = db.BooleanProperty(required=True)
string_list_field = db.StringListProperty(required=True)
class TestModel2(db.Model):
number = db.IntegerProperty(required=True)
data_field = db.StringProperty(required=True,
validator=MaxLengthValidator(20))
is_active = db.BooleanProperty(required=True)
string_list_field = db.StringListProperty(required=True)
class TestModelForm(ModelForm):
csrf_protected = False
class Meta():
model = TestModel
def __init__(self, instance=None, initial=None):
super(TestModelForm, self).__init__(instance, initial)
self.string_list_field.min_size = 1
class JsonTestModel(db.Model):
s = db.StringProperty()
i = db.IntegerProperty()
b = db.BooleanProperty()
l = db.StringListProperty()
r = db.ReferenceProperty()
class ModelFormTestModel(db.Model):
s_name = db.StringProperty()
zip_code = db.StringProperty()
addr = db.StringProperty()
class ModelFormTestForm(ModelForm):
csrf_protected = False
class Meta:
model = ModelFormTestModel
fields = ('s_name')
class ValidationTestModel(db.Model):
slist = db.StringListProperty()
class ValidationTestForm(ModelForm):
csrf_protected = False
class Meta:
model = ValidationTestModel
def context_validate(self, data):
raise ValidationError("Error!")
|
data/adblockplus/gyp/test/win/gyptest-link-tsaware.py
|
"""
Make sure tsaware setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('tsaware.gyp', chdir=CHDIR)
test.build('tsaware.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
if 'Terminal Server Aware' in GetHeaders('test_tsaware_no.exe'):
test.fail_test()
if 'Terminal Server Aware' not in GetHeaders('test_tsaware_yes.exe'):
test.fail_test()
test.pass_test()
|
data/SublimeText/SublimeCMD/actions.py
|
import os
import sublime
CMD_TARGET_APPLICATION = 0
CMD_TARGET_WINDOW = 1
CMD_TARGET_VIEW = 2
CMD_RUN = 'run_'
CMD_KEY = 'key_'
CMD_SET = 'set_'
def str_to_dict(s):
"""Converts a string like 'one:two three:4' into a dict with parsed values
that's suitable to pass as args to obj.run_command.
"""
d = {}
els = s.split(' ')
for el in els:
key, value = el.split(':')
try:
d['key'] = eval(value, {}, {})
except NameError:
d['key'] = value
return d
def run_(cmd):
target, predicate = cmd['target'], cmd['predicate']
if cmd['is_query']:
if cmd['forced']:
target.run_command('get_all_commands')
return
cmd_, _, args = predicate.partition(' ')
if args:
args = str_to_dict(args)
else:
args = {}
if not cmd['is_query']:
target.run_command(str(cmd_), args)
def set_(cmd):
target, predicate = cmd['target'], cmd['predicate']
if cmd['is_query']:
if cmd['forced']:
syntax = os.path.basename(target.settings().get('syntax'))
target.run_command('inspect_file_settings', {
'syntax': syntax,
'pattern': predicate
})
return
if not target.settings().has(predicate):
sublime.status_message('No setting named "%s" found for this object.' % predicate)
return
msg = "%s = %s" % (predicate, target.settings().get(predicate))
sublime.status_message(msg)
return
try:
name, _, value = predicate.partition(' ')
target.settings().set(name, eval(value, {}, {}))
msg = "%s = %s" % (name, target.settings().get(name))
sublime.status_message(msg)
except ValueError, e:
sublime.status_message('Invalid syntax for "set" command.')
raise e
def key_(args):
print "Not implemented."
|
data/VisTrails/VisTrails/examples/vtk_examples/Modelling/iceCream.py
|
import vtk
from vtk.util.colors import chocolate, mint
cone = vtk.vtkCone()
cone.SetAngle(20)
vertPlane = vtk.vtkPlane()
vertPlane.SetOrigin(.1, 0, 0)
vertPlane.SetNormal(-1, 0, 0)
basePlane = vtk.vtkPlane()
basePlane.SetOrigin(1.2, 0, 0)
basePlane.SetNormal(1, 0, 0)
iceCream = vtk.vtkSphere()
iceCream.SetCenter(1.333, 0, 0)
iceCream.SetRadius(0.5)
bite = vtk.vtkSphere()
bite.SetCenter(1.5, 0, 0.5)
bite.SetRadius(0.25)
theCone = vtk.vtkImplicitBoolean()
theCone.SetOperationTypeToIntersection()
theCone.AddFunction(cone)
theCone.AddFunction(vertPlane)
theCone.AddFunction(basePlane)
theCream = vtk.vtkImplicitBoolean()
theCream.SetOperationTypeToDifference()
theCream.AddFunction(iceCream)
theCream.AddFunction(bite)
theConeSample = vtk.vtkSampleFunction()
theConeSample.SetImplicitFunction(theCone)
theConeSample.SetModelBounds(-1, 1.5, -1.25, 1.25, -1.25, 1.25)
theConeSample.SetSampleDimensions(60, 60, 60)
theConeSample.ComputeNormalsOff()
theConeSurface = vtk.vtkContourFilter()
theConeSurface.SetInputConnection(theConeSample.GetOutputPort())
theConeSurface.SetValue(0, 0.0)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(theConeSurface.GetOutputPort())
coneMapper.ScalarVisibilityOff()
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(chocolate)
theCreamSample = vtk.vtkSampleFunction()
theCreamSample.SetImplicitFunction(theCream)
theCreamSample.SetModelBounds(0, 2.5, -1.25, 1.25, -1.25, 1.25)
theCreamSample.SetSampleDimensions(60, 60, 60)
theCreamSample.ComputeNormalsOff()
theCreamSurface = vtk.vtkContourFilter()
theCreamSurface.SetInputConnection(theCreamSample.GetOutputPort())
theCreamSurface.SetValue(0, 0.0)
creamMapper = vtk.vtkPolyDataMapper()
creamMapper.SetInputConnection(theCreamSurface.GetOutputPort())
creamMapper.ScalarVisibilityOff()
creamActor = vtk.vtkActor()
creamActor.SetMapper(creamMapper)
creamActor.GetProperty().SetColor(mint)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(coneActor)
ren.AddActor(creamActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(250, 250)
ren.ResetCamera()
ren.GetActiveCamera().Roll(90)
ren.GetActiveCamera().Dolly(1.5)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
|
data/RoseOu/flasky/venv/lib/python2.7/site-packages/pygments/console.py
|
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
codes["fuscia"] = codes["fuchsia"]
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
|
data/Kitware/minerva/server/rest/geojson_dataset.py
|
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import loadmodel, RestException
from girder.constants import AccessType
from girder.plugins.minerva.rest.dataset import Dataset
from girder.plugins.minerva.utility.minerva_utility import findDatasetFolder, \
updateMinervaMetadata
class GeojsonDataset(Dataset):
def __init__(self):
self.resourceName = 'minerva_dataset_geojson'
self.route('POST', (), self.createGeojsonDataset)
@access.user
@loadmodel(map={'itemId': 'item'}, model='item',
level=AccessType.WRITE)
def createGeojsonDataset(self, item, params):
user = self.getCurrentUser()
folder = findDatasetFolder(user, user, create=True)
if folder is None:
raise RestException('User has no Minerva Dataset folder.')
if folder['_id'] != item['folderId']:
raise RestException("Items need to be in user's Minerva Dataset " +
"folder.")
minerva_metadata = {
'original_type': 'geojson',
'dataset_type': 'geojson',
}
for file in self.model('item').childFiles(item=item, limit=0):
if 'geojson' in file['exts'] or 'json' in file['exts']:
minerva_metadata['original_files'] = [{
'name': file['name'], '_id': file['_id']}]
minerva_metadata['geojson_file'] = {
'name': file['name'], '_id': file['_id']}
break
if 'geojson_file' not in minerva_metadata:
raise RestException('Item contains no geojson file.')
updateMinervaMetadata(item, minerva_metadata)
return item
createGeojsonDataset.description = (
Description('Create a Geojson Dataset from an Item.')
.responseClass('Item')
.param('itemId', 'Item ID of the existing Geojson Item', required=True)
.errorResponse('ID was invalid.')
.errorResponse('Write permission denied on the Item.', 403))
|
data/Kuniwak/vint/vint/linting/policy/prohibit_abbreviation_option.py
|
import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy_registry import register_policy
from vint.ast.dictionary.abbreviations import (
Abbreviations,
AbbreviationsIncludingInvertPrefix,
)
SetCommandFamily = {
'set': True,
'setlocal': True,
'setglobal': True,
}
@register_policy
class ProhibitAbbreviationOption(AbstractPolicy):
def __init__(self):
super(ProhibitAbbreviationOption, self).__init__()
self.description = 'Use the full option name instead of the abbreviation'
self.reference = ':help option-summary'
self.level = Level.STYLE_PROBLEM
self.was_scriptencoding_found = False
self.has_encoding_opt_after_scriptencoding = False
def listen_node_types(self):
return [NodeType.EXCMD, NodeType.OPTION]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
Abbreviation options are invalid.
"""
node_type = NodeType(node['type'])
if node_type is NodeType.OPTION:
option_name = node['value'][1:]
is_valid = option_name not in Abbreviations
if not is_valid:
self._make_description_by_option_name(option_name)
return is_valid
excmd_node = node
is_set_cmd = excmd_node['ea']['cmd'].get('name') in SetCommandFamily
if not is_set_cmd:
return True
option_expr = excmd_node['str'].split()[1]
option_name = re.match(r'[a-z]+', option_expr).group(0)
is_valid = option_name not in AbbreviationsIncludingInvertPrefix
if not is_valid:
self._make_description_by_option_name(option_name)
return is_valid
def _make_description_by_option_name(self, option_name):
param = {
'good_pattern': AbbreviationsIncludingInvertPrefix[option_name],
'bad_pattern': option_name,
}
self.description = ('Use the full option name `{good_pattern}` '
'instead of `{bad_pattern}`'.format(**param))
|
data/agiliq/merchant/example/settings/travis.py
|
import os
from common import *
from formencode.variabledecode import variable_decode
DEBUG = False
def get_merchant_settings():
env_dict = dict(filter(lambda x: x[0].startswith('MERCHANT'), os.environ.items()))
return variable_decode(env_dict, dict_char='__')['MERCHANT']
MERCHANT_TEST_MODE = True
MERCHANT_SETTINGS = get_merchant_settings()
if MERCHANT_SETTINGS.get("pay_pal"):
PAYPAL_TEST = MERCHANT_TEST_MODE
PAYPAL_WPP_USER = MERCHANT_SETTINGS["pay_pal"]["WPP_USER"]
PAYPAL_WPP_PASSWORD = MERCHANT_SETTINGS["pay_pal"]["WPP_PASSWORD"]
PAYPAL_WPP_SIGNATURE = MERCHANT_SETTINGS["pay_pal"]["WPP_SIGNATURE"]
PAYPAL_RECEIVER_EMAIL = MERCHANT_SETTINGS["pay_pal"]["RECEIVER_EMAIL"]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'merchant.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ADMIN_MEDIA_PREFIX = "/static/admin/"
|
data/IvanMalison/okcupyd/okcupyd/tasks/db.py
|
import logging
from invoke import task
import IPython
from okcupyd import db
from okcupyd import util
from okcupyd.db import mailbox, model
from okcupyd.user import User
log = logging.getLogger(__name__)
@task(default=True)
def session():
with db.txn() as session:
IPython.embed()
@task
def reset():
util.enable_logger(__name__)
log.info(db.Base.metadata.bind)
db.Base.metadata.drop_all()
db.Base.metadata.create_all()
@task
def sync():
user = User()
mailbox.Sync(user).all()
log.info(model.Message.query(model.User.okc_id == user.profile.id))
@task
def make():
user = User()
user_model = model.User.from_profile(user.profile)
user_model.upsert_model(id_key='okc_id')
okcupyd_user = model.OKCupydUser(user_id=user_model.id)
okcupyd_user.upsert_model(id_key='user_id')
return okcupyd_user
|
data/adaptivdesign/django-sellmo/example/category/links.py
|
from sellmo.core import chaining
from sellmo.core.http.query import QueryString
from sellmo.apps.product.routines import (list_products_from_request,
product_filters_from_request)
from sellmo.contrib.category.views import category
from django.shortcuts import render
from django.template.response import TemplateResponse
from product.utils import paginate
@chaining.link(category)
def _category(request, category, **kwargs):
query = QueryString(request)
products = list_products_from_request(request, category=category, query=query)
if not category.is_leaf_node():
root = category
elif category.is_child_node():
root = category.parent
else:
root = category
products = paginate(request, products)
product_filters = product_filters_from_request(request, products.object_list.facets(), category=category, query=query)
yield TemplateResponse(request, 'category/category.html', {
'products': products,
'product_filters': product_filters,
'root': root,
'category': category,
'q': query
})
|
data/MirantisWorkloadMobility/CloudFerry/cloudferry/lib/os/actions/detach_used_volumes_via_compute.py
|
import copy
import logging
from oslo_config import cfg
from cloudferry.lib.base.action import action
from cloudferry.lib.utils import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class DetachVolumesCompute(action.Action):
def run(self, info, **kwargs):
info = copy.deepcopy(info)
compute_resource = self.cloud.resources[utils.COMPUTE_RESOURCE]
storage_resource = self.cloud.resources[utils.STORAGE_RESOURCE]
for instance in info[utils.INSTANCES_TYPE].itervalues():
LOG.info("Detaching volumes for instance %s [%s]",
instance['instance']['name'], instance['instance']['id'])
if not instance['instance'][utils.VOLUMES_TYPE]:
continue
for vol in instance['instance'][utils.VOLUMES_TYPE]:
volume_status = storage_resource.get_status(vol['id'])
LOG.debug("Volume %s was found. Status %s",
vol['id'], volume_status)
if volume_status == 'in-use':
compute_resource.detach_volume(instance['instance']['id'],
vol['id'])
LOG.debug("Detach volume %s", vol['id'])
timeout = CONF.migrate.storage_backend_timeout
storage_resource.wait_for_status(
vol['id'], storage_resource.get_status, 'available',
timeout=timeout)
return {}
|
data/abusesa/abusehelper/abusehelper/core/rules/tests/test_rules.py
|
from __future__ import unicode_literals
import re
import pickle
import unittest
from ..atoms import String, RegExp, IP, DomainName
from ..rules import And, Or, No, Match, NonMatch, Fuzzy
from ...events import Event
class TestRules(unittest.TestCase):
def test_results_should_get_cached(self):
rule = Match("a", "a")
cache = {}
rule.match(Event(), cache)
self.assertTrue(rule in cache)
self.assertFalse(cache[rule])
def test_cached_results_should_get_used(self):
rule = Match("a", "a")
cache = {rule: True}
self.assertTrue(rule.match(Event(), cache))
class TestAnd(unittest.TestCase):
def test_can_not_be_initialized_with_zero_arguments(self):
self.assertRaises(TypeError, And)
def test_commutativity(self):
a = Match("a", "a")
b = Match("b", "b")
self.assertEqual(And(a, b), And(b, a))
def test_redundant_arguments_get_deduplicated(self):
a = Match("a", "a")
self.assertEqual(And(a, a), And(a))
_options = [
And(Match("a"), Match("b"))
]
def test_pickling_and_unpickling(self):
for option in self._options:
self.assertEqual(option, pickle.loads(pickle.dumps(option)))
def test_repr(self):
for option in self._options:
self.assertEqual(option, eval(repr(option)))
class TestOr(unittest.TestCase):
def test_can_not_be_initialized_with_zero_arguments(self):
self.assertRaises(TypeError, Or)
def test_commutativity(self):
a = Match("a", "a")
b = Match("b", "b")
self.assertEqual(Or(a, b), Or(b, a))
def test_redundant_arguments_get_deduplicated(self):
a = Match("a", "a")
self.assertEqual(Or(a, a), Or(a))
_options = [
Or(Match("a"), Match("b"))
]
def test_pickling_and_unpickling(self):
for option in self._options:
self.assertEqual(option, pickle.loads(pickle.dumps(option)))
def test_repr(self):
for option in self._options:
self.assertEqual(option, eval(repr(option)))
class TestNo(unittest.TestCase):
_options = [
No(Match("a"))
]
def test_pickling_and_unpickling(self):
for option in self._options:
self.assertEqual(option, pickle.loads(pickle.dumps(option)))
def test_repr(self):
for option in self._options:
self.assertEqual(option, eval(repr(option)))
class TestMatch(unittest.TestCase):
def test_init_conversions(self):
self.assertEqual(
Match("a", "b"),
Match("a", String("b")))
self.assertEqual(
Match("a", re.compile("b")),
Match("a", RegExp("b")))
def test_domainname(self):
matcher = Match("a", DomainName("*.example"))
self.assertTrue(matcher.match(Event({"a": "domain.example"})))
self.assertTrue(matcher.match(Event({"a": "sub.domain.example"})))
self.assertFalse(matcher.match(Event({"a": "*.example"})))
self.assertFalse(matcher.match(Event({"a": "*.domain.example"})))
self.assertFalse(matcher.match(Event({"a": "domain.test"})))
_options = [
Match(),
Match("a", String("b")),
Match("a", RegExp("b")),
Match("a", IP("192.0.2.0")),
Match("a", DomainName("*.example"))
]
def test_pickling_and_unpickling(self):
for option in self._options:
self.assertEqual(option, pickle.loads(pickle.dumps(option)))
def test_repr(self):
for option in self._options:
self.assertEqual(option, eval(repr(option)))
class TestNonMatch(unittest.TestCase):
def test_init_conversions(self):
self.assertEqual(
NonMatch("a", "b"),
NonMatch("a", String("b")))
self.assertEqual(
NonMatch("a", re.compile("b")),
NonMatch("a", RegExp("b")))
def test_domainname(self):
matcher = NonMatch("a", DomainName("*.example"))
self.assertFalse(matcher.match(Event({"a": "domain.example"})))
self.assertFalse(matcher.match(Event({"a": "sub.domain.example"})))
self.assertTrue(matcher.match(Event({"a": "*.example"})))
self.assertTrue(matcher.match(Event({"a": "*.domain.example"})))
self.assertTrue(matcher.match(Event({"a": "domain.test"})))
_options = [
NonMatch(),
NonMatch("a", String("b")),
NonMatch("a", RegExp("b")),
NonMatch("a", IP("192.0.2.0")),
NonMatch("a", DomainName("*.example"))
]
def test_pickling_and_unpickling(self):
for option in self._options:
self.assertEqual(option, pickle.loads(pickle.dumps(option)))
def test_repr(self):
for option in self._options:
self.assertEqual(option, eval(repr(option)))
class TestFuzzy(unittest.TestCase):
def test_base(self):
rule = Fuzzy(String("a"))
self.assertTrue(rule.match(Event({"a": "xy"})))
self.assertTrue(rule.match(Event({"xy": "a"})))
self.assertTrue(rule.match(Event({"ba": "xy"})))
self.assertTrue(rule.match(Event({"xy": "ba"})))
self.assertFalse(rule.match(Event({"xy": "xy"})))
self.assertTrue(rule.match(Event({"A": "xy"})))
self.assertTrue(rule.match(Event({"xy": "A"})))
rule = Fuzzy(RegExp("a"))
self.assertTrue(rule.match(Event({"a": "xy"})))
self.assertTrue(rule.match(Event({"xy": "a"})))
self.assertTrue(rule.match(Event({"ba": "xy"})))
self.assertTrue(rule.match(Event({"xy": "ba"})))
self.assertFalse(rule.match(Event({"xy": "xy"})))
self.assertFalse(rule.match(Event({"A": "xy"})))
self.assertFalse(rule.match(Event({"xy": "A"})))
_options = [
Fuzzy(String("a")),
Fuzzy(RegExp("a")),
Fuzzy(IP("192.0.2.0")),
Fuzzy(DomainName("*.example"))
]
def test_pickling_and_unpickling(self):
for option in self._options:
self.assertEqual(option, pickle.loads(pickle.dumps(option)))
def test_repr(self):
for option in self._options:
self.assertEqual(option, eval(repr(option)))
|
data/ProgVal/Limnoria/src/drivers/Socket.py
|
"""
Contains simple socket drivers. Asyncore bugged (haha, pun!) me.
"""
from __future__ import division
import os
import time
import errno
import select
import socket
from .. import (conf, drivers, log, utils, world)
from ..utils import minisix
from ..utils.str import decode_raw_line
try:
import ssl
SSLError = ssl.SSLError
except:
drivers.log.debug('ssl module is not available, '
'cannot connect to SSL servers.')
class SSLError(Exception):
pass
class SocketDriver(drivers.IrcDriver, drivers.ServersMixin):
_instances = []
_selecting = [False]
def __init__(self, irc):
self._instances.append(self)
assert irc is not None
self.irc = irc
drivers.IrcDriver.__init__(self, irc)
drivers.ServersMixin.__init__(self, irc)
self.conn = None
self._attempt = -1
self.servers = ()
self.eagains = 0
self.inbuffer = b''
self.outbuffer = ''
self.zombie = False
self.connected = False
self.writeCheckTime = None
self.nextReconnectTime = None
self.resetDelay()
if self.networkGroup.get('ssl').value and 'ssl' not in globals():
drivers.log.error('The Socket driver can not connect to SSL '
'servers for your Python version. Try the '
'Twisted driver instead, or install a Python'
'version that supports SSL (2.6 and greater).')
self.ssl = False
else:
self.ssl = self.networkGroup.get('ssl').value
self.connect()
def getDelay(self):
ret = self.currentDelay
self.currentDelay = min(self.currentDelay * 2,
conf.supybot.drivers.maxReconnectWait())
return ret
def resetDelay(self):
self.currentDelay = 10.0
def _getNextServer(self):
oldServer = getattr(self, 'currentServer', None)
server = drivers.ServersMixin._getNextServer(self)
if self.currentServer != oldServer:
self.resetDelay()
return server
def _handleSocketError(self, e):
if e.args[0] != 11 or self.eagains > 120:
drivers.log.disconnect(self.currentServer, e)
if self in self._instances:
self._instances.remove(self)
try:
self.conn.close()
except:
pass
self.connected = False
self.scheduleReconnect()
else:
log.debug('Got EAGAIN, current count: %s.', self.eagains)
self.eagains += 1
def _sendIfMsgs(self):
if not self.connected:
return
if not self.zombie:
msgs = [self.irc.takeMsg()]
while msgs[-1] is not None:
msgs.append(self.irc.takeMsg())
del msgs[-1]
self.outbuffer += ''.join(map(str, msgs))
if self.outbuffer:
try:
if minisix.PY2:
sent = self.conn.send(self.outbuffer)
else:
sent = self.conn.send(self.outbuffer.encode())
self.outbuffer = self.outbuffer[sent:]
self.eagains = 0
except socket.error as e:
self._handleSocketError(e)
if self.zombie and not self.outbuffer:
self._reallyDie()
@classmethod
def _select(cls):
if cls._selecting[0]:
return
try:
cls._selecting[0] = True
for inst in cls._instances:
if not inst.connected or \
(minisix.PY3 and inst.conn._closed) or \
(minisix.PY2 and
inst.conn._sock.__class__ is socket._closedsocket):
cls._instances.remove(inst)
elif inst.conn.fileno() == -1:
inst.reconnect()
if not cls._instances:
return
rlist, wlist, xlist = select.select([x.conn for x in cls._instances],
[], [], conf.supybot.drivers.poll())
for instance in cls._instances:
if instance.conn in rlist:
instance._read()
except select.error as e:
if e.args[0] != errno.EINTR:
raise
finally:
cls._selecting[0] = False
for instance in cls._instances:
if instance.irc and not instance.irc.zombie:
instance._sendIfMsgs()
def run(self):
now = time.time()
if self.nextReconnectTime is not None and now > self.nextReconnectTime:
self.reconnect()
elif self.writeCheckTime is not None and now > self.writeCheckTime:
self._checkAndWriteOrReconnect()
if not self.connected:
time.sleep(conf.supybot.drivers.poll())
return
self._sendIfMsgs()
self._select()
def _read(self):
"""Called by _select() when we can read data."""
try:
self.inbuffer += self.conn.recv(1024)
self.eagains = 0
lines = self.inbuffer.split(b'\n')
self.inbuffer = lines.pop()
for line in lines:
line = decode_raw_line(line)
msg = drivers.parseMsg(line)
if msg is not None and self.irc is not None:
self.irc.feedMsg(msg)
except socket.timeout:
pass
except SSLError as e:
if e.args[0] == 'The read operation timed out':
pass
else:
self._handleSocketError(e)
return
except socket.error as e:
self._handleSocketError(e)
return
if self.irc and not self.irc.zombie:
self._sendIfMsgs()
def connect(self, **kwargs):
self.reconnect(reset=False, **kwargs)
def reconnect(self, wait=False, reset=True):
self._attempt += 1
self.nextReconnectTime = None
if self.connected:
drivers.log.reconnect(self.irc.network)
if self in self._instances:
self._instances.remove(self)
try:
self.conn.shutdown(socket.SHUT_RDWR)
except:
pass
self.conn.close()
self.connected = False
if reset:
drivers.log.debug('Resetting %s.', self.irc)
self.irc.reset()
else:
drivers.log.debug('Not resetting %s.', self.irc)
if wait:
self.scheduleReconnect()
return
self.server = self._getNextServer()
network_config = getattr(conf.supybot.networks, self.irc.network)
socks_proxy = network_config.socksproxy()
try:
if socks_proxy:
import socks
except ImportError:
log.error('Cannot use socks proxy (SocksiPy not installed), '
'using direct connection instead.')
socks_proxy = ''
if socks_proxy:
address = self.server[0]
else:
try:
address = utils.net.getAddressFromHostname(self.server[0],
attempt=self._attempt)
except (socket.gaierror, socket.error) as e:
drivers.log.connectError(self.currentServer, e)
self.scheduleReconnect()
return
port = self.server[1]
drivers.log.connect(self.currentServer)
try:
self.conn = utils.net.getSocket(address, port=port,
socks_proxy=socks_proxy,
vhost=conf.supybot.protocols.irc.vhost(),
vhostv6=conf.supybot.protocols.irc.vhostv6(),
)
except socket.error as e:
drivers.log.connectError(self.currentServer, e)
self.scheduleReconnect()
return
self.conn.settimeout(max(10, conf.supybot.drivers.poll()*10))
try:
self.conn.connect((address, port))
if network_config.ssl():
self.starttls()
elif not network_config.requireStarttls():
drivers.log.warning(('Connection to network %s '
'does not use SSL/TLS, which makes it vulnerable to '
'man-in-the-middle attacks and passive eavesdropping. '
'You should consider upgrading your connection to SSL/TLS '
'<http://doc.supybot.aperio.fr/en/latest/use/faq.html
% self.irc.network)
def setTimeout():
self.conn.settimeout(conf.supybot.drivers.poll())
conf.supybot.drivers.poll.addCallback(setTimeout)
setTimeout()
self.connected = True
self.resetDelay()
except socket.error as e:
if e.args[0] == 115:
now = time.time()
when = now + 60
whenS = log.timestamp(when)
drivers.log.debug('Connection in progress, scheduling '
'connectedness check for %s', whenS)
self.writeCheckTime = when
else:
drivers.log.connectError(self.currentServer, e)
self.scheduleReconnect()
return
self._instances.append(self)
def _checkAndWriteOrReconnect(self):
self.writeCheckTime = None
drivers.log.debug('Checking whether we are connected.')
(_, w, _) = select.select([], [self.conn], [], 0)
if w:
drivers.log.debug('Socket is writable, it might be connected.')
self.connected = True
self.resetDelay()
else:
drivers.log.connectError(self.currentServer, 'Timed out')
self.reconnect()
def scheduleReconnect(self):
when = time.time() + self.getDelay()
if not world.dying:
drivers.log.reconnect(self.irc.network, when)
if self.nextReconnectTime:
drivers.log.error('Updating next reconnect time when one is '
'already present. This is a bug; please '
'report it, with an explanation of what caused '
'this to happen.')
self.nextReconnectTime = when
def die(self):
if self in self._instances:
self._instances.remove(self)
self.zombie = True
if self.nextReconnectTime is not None:
self.nextReconnectTime = None
if self.writeCheckTime is not None:
self.writeCheckTime = None
drivers.log.die(self.irc)
def _reallyDie(self):
if self.conn is not None:
self.conn.close()
drivers.IrcDriver.die(self)
def name(self):
return '%s(%s)' % (self.__class__.__name__, self.irc)
def starttls(self):
assert 'ssl' in globals()
network_config = getattr(conf.supybot.networks, self.irc.network)
certfile = network_config.certfile()
if not certfile:
certfile = conf.supybot.protocols.irc.certfile()
if not certfile:
certfile = None
elif not os.path.isfile(certfile):
drivers.log.warning('Could not find cert file %s.' %
certfile)
certfile = None
verifyCertificates = conf.supybot.protocols.ssl.verifyCertificates()
if not verifyCertificates:
drivers.log.warning('Not checking SSL certificates, connections '
'are vulnerable to man-in-the-middle attacks. Set '
'supybot.protocols.ssl.verifyCertificates to "true" '
'to enable validity checks.')
try:
self.conn = utils.net.ssl_wrap_socket(self.conn,
logger=drivers.log, hostname=self.server[0],
certfile=certfile,
verify=verifyCertificates,
trusted_fingerprints=network_config.ssl.serverFingerprints(),
ca_file=network_config.ssl.authorityCertificate(),
)
except getattr(ssl, 'CertificateError', None) as e:
drivers.log.error(('Certificate validation failed when '
'connecting to %s: %s\n'
'This means either someone is doing a man-in-the-middle '
'attack on your connection, or the server\'s certificate is '
'not in your trusted fingerprints list.')
% (self.irc.network, e.args[0]))
raise ssl.SSLError('Aborting because of failed certificate '
'verification.')
except ssl.SSLError as e:
drivers.log.error(('Certificate validation failed when '
'connecting to %s: %s\n'
'This means either someone is doing a man-in-the-middle '
'attack on your connection, or the server\'s '
'certificate is not trusted.')
% (self.irc.network, e.args[1]))
raise ssl.SSLError('Aborting because of failed certificate '
'verification.')
Driver = SocketDriver
|
data/Juniper/py-junos-eznc/lib/jnpr/junos/op/isis.py
|
"""
Pythonifier for ISIS Table/View
"""
from jnpr.junos.factory import loadyaml
from os.path import splitext
_YAML_ = splitext(__file__)[0] + '.yml'
globals().update(loadyaml(_YAML_))
|
data/Tanganelli/CoAPthon/coapthon/messages/message.py
|
from coapthon.utils import parse_blockwise
from coapthon import defines
from coapthon.messages.option import Option
__author__ = 'Giacomo Tanganelli'
__version__ = "3.0"
class Message(object):
def __init__(self):
self._type = None
self._mid = None
self._token = None
self._options = []
self._payload = None
self._destination = None
self._source = None
self._code = None
self._acknowledged = None
self._rejected = None
self._timeouted = None
self._cancelled = None
self._duplicated = None
self._timestamp = None
self._version = 1
@property
def version(self):
return self._version
@version.setter
def version(self, v):
if not isinstance(v, int) or v != 1:
raise AttributeError
self._version = v
@property
def type(self):
"""
Return the type of the message.
:return: the type
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the type of the message.
:type value: Types
:param value: the type
:raise AttributeError: if value is not a valid type
"""
if value not in defines.Types.values():
raise AttributeError
self._type = value
@property
def mid(self):
"""
Return the mid of the message.
:return: the MID
"""
return self._mid
@mid.setter
def mid(self, value):
"""
Sets the MID of the message.
:type value: Integer
:param value: the MID
:raise AttributeError: if value is not int or cannot be represented on 16 bits.
"""
if not isinstance(value, int) or value > 65536:
raise AttributeError
self._mid = value
@mid.deleter
def mid(self):
self._mid = None
@property
def token(self):
"""
Get the Token of the message.
:return: the Token
"""
return self._token
@token.setter
def token(self, value):
"""
Set the Token of the message.
:type value: String
:param value: the Token
"""
if not isinstance(value, str):
value = str(value)
if len(value) > 256:
raise AttributeError
self._token = value
@token.deleter
def token(self):
self._token = None
@property
def options(self):
"""
"""
return self._options
@options.setter
def options(self, value):
"""
:type value: list
:param value: list of options
"""
if value is None:
value = []
assert isinstance(value, list)
self._options = value
@property
def payload(self):
"""
Return the payload.
:return: the payload
"""
return self._payload
@payload.setter
def payload(self, value):
"""
Sets the payload of the message and eventually the Content-Type
:param value: the payload
"""
if isinstance(value, tuple):
content_type, payload = value
self.content_type = content_type
self._payload = payload
else:
self._payload = value
@property
def destination(self):
"""
"""
return self._destination
@destination.setter
def destination(self, value):
"""
:type value: tuple
:param value: (ip, port)
:raise AttributeError: if value is not a ip and a port.
"""
if value is not None and (not isinstance(value, tuple) or len(value)) != 2:
raise AttributeError
self._destination = value
@property
def source(self):
"""
"""
return self._source
@source.setter
def source(self, value):
"""
:type value: tuple
:param value: (ip, port)
:raise AttributeError: if value is not a ip and a port.
"""
if not isinstance(value, tuple) or len(value) != 2:
raise AttributeError
self._source = value
@property
def code(self):
"""
"""
return self._code
@code.setter
def code(self, value):
"""
:type value: Codes
:param value: the code
:raise AttributeError: if value is not a valid code
"""
if value not in defines.Codes.LIST.keys() and value is not None:
raise AttributeError
self._code = value
@property
def acknowledged(self):
"""
Checks if is this message has been acknowledged.
:return: True, if is acknowledged
"""
return self._acknowledged
@acknowledged.setter
def acknowledged(self, value):
"""
Marks this message as acknowledged.
:type value: Boolean
:param value: if acknowledged
"""
assert (isinstance(value, bool))
self._acknowledged = value
if value:
self._timeouted = False
self._rejected = False
self._cancelled = False
@property
def rejected(self):
"""
Checks if this message has been rejected.
:return: True, if is rejected
"""
return self._rejected
@rejected.setter
def rejected(self, value):
"""
Marks this message as rejected.
:type value: Boolean
:param value: if rejected
"""
assert (isinstance(value, bool))
self._rejected = value
if value:
self._timeouted = False
self._acknowledged = False
self._cancelled = True
@property
def timeouted(self):
"""
Checks if this message has timeouted. Confirmable messages in particular
might timeout.
:return: True, if has timeouted
"""
return self._timeouted
@timeouted.setter
def timeouted(self, value):
"""
Marks this message as timeouted. Confirmable messages in particular might
timeout.
:type value: Boolean
:param value:
"""
assert (isinstance(value, bool))
self._timeouted = value
if value:
self._acknowledged = False
self._rejected = False
self._cancelled = True
@property
def duplicated(self):
"""
Checks if this message is a duplicate.
:return: True, if is a duplicate
"""
return self._duplicated
@duplicated.setter
def duplicated(self, value):
"""
Marks this message as a duplicate.
:type value: Boolean
:param value: if a duplicate
"""
assert (isinstance(value, bool))
self._duplicated = value
@property
def timestamp(self):
"""
"""
return self._timestamp
@timestamp.setter
def timestamp(self, value):
"""
:type value: Message
:param value:
"""
self._timestamp = value
def _already_in(self, option):
"""
Check if an option is already in the message.
:type option: Option
:param option: the option to be checked
:return: True if already present, False otherwise
"""
for opt in self._options:
if option.number == opt.number:
return True
return False
def add_option(self, option):
"""
Add an option to the message.
:type option: Option
:param option: the option
:raise TypeError: if the option is not repeatable and such option is already present in the message
"""
assert isinstance(option, Option)
repeatable = defines.OptionRegistry.LIST[option.number].repeatable
if not repeatable:
ret = self._already_in(option)
if ret:
raise TypeError("Option : %s is not repeatable", option.name)
else:
self._options.append(option)
else:
self._options.append(option)
def del_option(self, option):
"""
Delete an option from the message
:type option: Option
:param option: the option
"""
assert isinstance(option, Option)
while option in list(self._options):
self._options.remove(option)
def del_option_by_name(self, name):
"""
Delete an option from the message by name
:type name: String
:param name: option name
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.name == name:
self._options.remove(o)
def del_option_by_number(self, number):
"""
Delete an option from the message by number
:type number: Integer
:param number: option naumber
"""
for o in list(self._options):
assert isinstance(o, Option)
if o.number == number:
self._options.remove(o)
@property
def etag(self):
"""
Get the ETag option of the message.
:return: the ETag values or [] if not specified by the request
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.ETAG.number:
value.append(option.value)
return value
@etag.setter
def etag(self, etag):
"""
Add an ETag option to the message.
:param etag: the etag
"""
if not isinstance(etag, list):
etag = [etag]
for e in etag:
option = Option()
option.number = defines.OptionRegistry.ETAG.number
option.value = e
self.add_option(option)
@etag.deleter
def etag(self):
"""
Delete an ETag from a message.
"""
self.del_option_by_number(defines.OptionRegistry.ETAG.number)
@property
def content_type(self):
"""
Get the Content-Type option of a response.
:return: the Content-Type value or 0 if not specified by the response
"""
value = 0
for option in self.options:
if option.number == defines.OptionRegistry.CONTENT_TYPE.number:
value = int(option.value)
return value
@content_type.setter
def content_type(self, content_type):
"""
Set the Content-Type option of a response.
:type content_type: int
:param content_type: the Content-Type
"""
option = Option()
option.number = defines.OptionRegistry.CONTENT_TYPE.number
option.value = int(content_type)
self.add_option(option)
@content_type.deleter
def content_type(self):
self.del_option_by_number(defines.OptionRegistry.CONTENT_TYPE.number)
@property
def observe(self):
"""
Check if the request is an observing request.
:return: 0, if the request is an observing request
"""
for option in self.options:
if option.number == defines.OptionRegistry.OBSERVE.number:
if option.value is None:
return 0
return option.value
return None
@observe.setter
def observe(self, ob):
"""
Add the Observe option.
:param ob: observe count
"""
option = Option()
option.number = defines.OptionRegistry.OBSERVE.number
option.value = ob
self.del_option_by_number(defines.OptionRegistry.OBSERVE.number)
self.add_option(option)
@observe.deleter
def observe(self):
self.del_option_by_number(defines.OptionRegistry.OBSERVE.number)
@property
def block1(self):
"""
Get the Block1 option.
:return: the Block1 value
"""
value = None
for option in self.options:
if option.number == defines.OptionRegistry.BLOCK1.number:
value = parse_blockwise(option.value)
return value
@block1.setter
def block1(self, value):
"""
Set the Block1 option.
:param value: the Block1 value
"""
option = Option()
option.number = defines.OptionRegistry.BLOCK1.number
num, m, size = value
if size > 512:
szx = 6
elif 256 < size <= 512:
szx = 5
elif 128 < size <= 256:
szx = 4
elif 64 < size <= 128:
szx = 3
elif 32 < size <= 64:
szx = 2
elif 16 < size <= 32:
szx = 1
else:
szx = 0
value = (num << 4)
value |= (m << 3)
value |= szx
option.value = value
self.add_option(option)
@block1.deleter
def block1(self):
self.del_option_by_number(defines.OptionRegistry.BLOCK1.number)
@property
def block2(self):
"""
:rtype : String
"""
value = None
for option in self.options:
if option.number == defines.OptionRegistry.BLOCK2.number:
value = parse_blockwise(option.value)
return value
@block2.setter
def block2(self, value):
option = Option()
option.number = defines.OptionRegistry.BLOCK2.number
num, m, size = value
if size > 512:
szx = 6
elif 256 < size <= 512:
szx = 5
elif 128 < size <= 256:
szx = 4
elif 64 < size <= 128:
szx = 3
elif 32 < size <= 64:
szx = 2
elif 16 < size <= 32:
szx = 1
else:
szx = 0
value = (num << 4)
value |= (m << 3)
value |= szx
option.value = value
self.add_option(option)
@block2.deleter
def block2(self):
self.del_option_by_number(defines.OptionRegistry.BLOCK2.number)
@property
def line_print(self):
inv_types = {v: k for k, v in defines.Types.iteritems()}
if self._code is None:
self._code = defines.Codes.EMPTY.number
msg = "From {source}, To {destination}, {type}-{mid}, {code}-{token}, ["\
.format(source=self._source, destination=self._destination, type=inv_types[self._type], mid=self._mid,
code=defines.Codes.LIST[self._code].name, token=self._token)
for opt in self._options:
msg += "{name}: {value}, ".format(name=opt.name, value=opt.value)
msg += "]"
if self.payload is not None:
msg += " {payload}...{length} bytes".format(payload=self.payload[0:20], length=len(self.payload))
else:
msg += " No payload"
return msg
def __str__(self):
return self.line_print
def pretty_print(self):
"""
Return the message as a formatted string.
:return: the string representing the message
"""
msg = "Source: " + str(self._source) + "\n"
msg += "Destination: " + str(self._destination) + "\n"
inv_types = {v: k for k, v in defines.Types.iteritems()}
msg += "Type: " + str(inv_types[self._type]) + "\n"
msg += "MID: " + str(self._mid) + "\n"
if self._code is None:
self._code = 0
msg += "Code: " + str(defines.Codes.LIST[self._code].name) + "\n"
msg += "Token: " + str(self._token) + "\n"
for opt in self._options:
msg += str(opt)
msg += "Payload: " + "\n"
msg += str(self._payload) + "\n"
return msg
|
data/ZEROFAIL/goblin/goblin/properties/__init__.py
|
from .properties import *
from .strategy import *
|
data/SmokinCaterpillar/pypet/pypet/tests/profiling/speed_analysis/avg_runtima_as_function_of_length.py
|
__author__ = 'robert'
from pypet import Environment, Trajectory
from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def job(traj):
traj.f_ares('$set.$', 42, comment='A result')
def get_runtime(length):
filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
with Environment(filename = filename,
log_levels=50, report_progress=(0.0002, 'progress', 50),
overwrite_file=True, purge_duplicate_comments=False,
log_stdout=False,
multiproc=False, ncores=2, use_pool=True,
wrap_mode='PIPE',
summary_tables=False, small_overview_tables=False) as env:
traj = env.v_traj
traj.par.f_apar('x', 0, 'parameter')
traj.f_explore({'x': range(length)})
max_run = 1000
for idx in range(len(traj)):
if idx > max_run:
traj.f_get_run_information(idx, copy=False)['completed'] = 1
start = time.time()
env.f_run(job)
end = time.time()
total = end - start
return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj)
def main():
lengths = [100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
runtimes = [get_runtime(x) for x in lengths]
avg_runtimes = [x[0] for x in runtimes]
summed_runtime = [x[1] for x in runtimes]
plt.subplot(2, 1, 1)
plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2)
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Average Runtime per single run')
plt.grid()
plt.subplot(2, 1, 2)
plt.loglog(lengths, summed_runtime, linewidth=2)
plt.grid()
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Total runtime of experiment')
plt.savefig('avg_runtime_as_func_of_lenght_1000_single_core')
plt.show()
if __name__ == '__main__':
main()
|
data/JeffHeard/sondra/sondra/tests/test_valuehandlers.py
|
from sondra.document.valuehandlers import DateTime, Geometry, Now
from shapely.geometry import Point
from datetime import datetime
import rethinkdb as r
import pytest
from sondra.tests.api import *
from sondra.auth import Auth
s = ConcreteSuite()
api = SimpleApp(s)
auth = Auth(s)
AuthenticatedApp(s)
AuthorizedApp(s)
s.ensure_database_objects()
@pytest.fixture(scope='module')
def simple_doc(request):
simple_doc = s['simple-app']['simple-documents'].create({
'name': "valuehandler test",
"date": datetime.now(),
"value": 0
})
def teardown():
simple_doc.delete()
request.addfinalizer(teardown)
return simple_doc
@pytest.fixture(scope='module')
def fk_doc(request, simple_doc):
fk_doc = s['simple-app']['foreign-key-docs'].create({
'name': "valuehandler test foreign key",
'simple_document': simple_doc,
'rest': [simple_doc]
})
def teardown():
fk_doc.delete()
request.addfinalizer(teardown)
return fk_doc
def test_foreignkey(fk_doc, simple_doc):
retr_doc = s['simple-app']['foreign-key-docs']['valuehandler-test-foreign-key']
assert isinstance(fk_doc.obj['simple_document'], str)
assert fk_doc.obj['simple_document'] == simple_doc.url
assert isinstance(retr_doc.obj['simple_document'], str)
assert retr_doc.obj['simple_document'] == simple_doc.url
storage_repr = fk_doc.rql_repr()
assert storage_repr['simple_document'] == simple_doc.id
assert isinstance(fk_doc['simple_document'], SimpleDocument)
|
data/UDST/activitysim/activitysim/activitysim.py
|
from operator import itemgetter
import numpy as np
import pandas as pd
from zbox import toolz as tz
from .skim import Skims, Skims3D
from .mnl import utils_to_probs, make_choices, interaction_dataset
import os
import psutil
import gc
def usage(when):
gc.collect()
process = psutil.Process(os.getpid())
bytes = process.memory_info().rss
mb = (bytes / (1024 * 1024.0))
gb = (bytes / (1024 * 1024 * 1024.0))
return "USAGE: %s current: %s MB (%s GB)" % \
(when, int(mb), round(gb, 2))
def random_rows(df, n):
return df.take(np.random.choice(len(df), size=n, replace=False))
def read_model_spec(fname,
description_name="Description",
expression_name="Expression"):
"""
Read a CSV model specification into a Pandas DataFrame or Series.
The CSV is expected to have columns for component descriptions
and expressions, plus one or more alternatives.
The CSV is required to have a header with column names. For example:
Description,Expression,alt0,alt1,alt2
Parameters
----------
fname : str
Name of a CSV spec file.
description_name : str, optional
Name of the column in `fname` that contains the component description.
expression_name : str, optional
Name of the column in `fname` that contains the component expression.
Returns
-------
spec : pandas.DataFrame
The description column is dropped from the returned data and the
expression values are set as the table index.
"""
cfg = pd.read_csv(fname, comment='
cfg = cfg.dropna(subset=[expression_name])
cfg = cfg.drop(description_name, axis=1).set_index(expression_name)
return cfg
def eval_variables(exprs, df, locals_d=None):
"""
Evaluate a set of variable expressions from a spec in the context
of a given data table.
There are two kinds of supported expressions: "simple" expressions are
evaluated in the context of the DataFrame using DataFrame.eval.
This is the default type of expression.
Python expressions are evaluated in the context of this function using
Python's eval function. Because we use Python's eval this type of
expression supports more complex operations than a simple expression.
Python expressions are denoted by beginning with the @ character.
Users should take care that these expressions must result in
a Pandas Series.
Parameters
----------
exprs : sequence of str
df : pandas.DataFrame
locals_d : Dict
This is a dictionary of local variables that will be the environment
for an evaluation of an expression that begins with @
Returns
-------
variables : pandas.DataFrame
Will have the index of `df` and columns of `exprs`.
"""
if locals_d is None:
locals_d = {}
locals_d.update(locals())
def to_series(x):
if np.isscalar(x):
return pd.Series([x] * len(df), index=df.index)
return x
l = []
for e in exprs:
try:
l.append((e, to_series(eval(e[1:], globals(), locals_d))
if e.startswith('@') else df.eval(e)))
except Exception as err:
print "Variable evaluation failed for: %s" % str(e)
raise err
return pd.DataFrame.from_items(l)
def add_skims(df, skims):
"""
Add the dataframe to the Skims object so that it can be dereferenced
using the parameters of the skims object.
Parameters
----------
df : pandas.DataFrame
Table to which to add skim data as new columns.
`df` is modified in-place.
skims : Skims object
The skims object is used to contain multiple matrices of
origin-destination impedances. Make sure to also add it to the
locals_d below in order to access it in expressions. The *only* job
of this method in regards to skims is to call set_df with the
dataframe that comes back from interacting choosers with
alternatives. See the skims module for more documentation on how
the skims object is intended to be used.
"""
if not isinstance(skims, list):
assert isinstance(skims, Skims) or isinstance(skims, Skims3D)
skims.set_df(df)
else:
for skim in skims:
assert isinstance(skim, Skims) or isinstance(skim, Skims3D)
skim.set_df(df)
def _check_for_variability(model_design):
"""
This is an internal method which checks for variability in each
expression - under the assumption that you probably wouldn't be using a
variable (in live simulations) if it had no variability. This is a
warning to the user that they might have constructed the variable
incorrectly. It samples 1000 rows in order to not hurt performance -
it's likely that if 1000 rows have no variability, the whole dataframe
will have no variability.
"""
l = min(1000, len(model_design))
sample = random_rows(model_design, l)
sample = sample.astype('float')
sample = sample.describe().transpose()
error = sample[sample["std"] == 0]
if len(error):
print "WARNING: Some columns have no variability:\n", \
error.index.values
error = sample[sample["count"] < l]
if len(error):
print "WARNING: Some columns have missing values:\n", \
error.index.values
def simple_simulate(choosers, spec, skims=None, locals_d=None):
"""
Run a simulation for when the model spec does not involve alternative
specific data, e.g. there are no interactions with alternative
properties and no need to sample from alternatives.
Parameters
----------
choosers : pandas.DataFrame
spec : pandas.DataFrame
A table of variable specifications and coefficient values.
Variable expressions should be in the table index and the table
should have a column for each alternative.
skims : Skims object
The skims object is used to contain multiple matrices of
origin-destination impedances. Make sure to also add it to the
locals_d below in order to access it in expressions. The *only* job
of this method in regards to skims is to call set_df with the
dataframe that comes back from interacting choosers with
alternatives. See the skims module for more documentation on how
the skims object is intended to be used.
locals_d : Dict
This is a dictionary of local variables that will be the environment
for an evaluation of an expression that begins with @
Returns
-------
choices : pandas.Series
Index will be that of `choosers`, values will match the columns
of `spec`.
"""
if skims:
add_skims(choosers, skims)
model_design = eval_variables(spec.index, choosers, locals_d)
_check_for_variability(model_design)
utilities = model_design.dot(spec)
probs = utils_to_probs(utilities)
choices = make_choices(probs)
return choices, model_design
def interaction_simulate(
choosers, alternatives, spec,
skims=None, locals_d=None, sample_size=None):
"""
Run a simulation in the situation in which alternatives must
be merged with choosers because there are interaction terms or
because alternatives are being sampled.
Parameters
----------
choosers : pandas.DataFrame
DataFrame of choosers
alternatives : pandas.DataFrame
DataFrame of alternatives - will be merged with choosers, currently
without sampling
spec : pandas.DataFrame
A Pandas DataFrame that gives the specification of the variables to
compute and the coefficients for each variable.
Variable specifications must be in the table index and the
table should have only one column of coefficients.
skims : Skims object
The skims object is used to contain multiple matrices of
origin-destination impedances. Make sure to also add it to the
locals_d below in order to access it in expressions. The *only* job
of this method in regards to skims is to call set_df with the
dataframe that comes back from interacting choosers with
alternatives. See the skims module for more documentation on how
the skims object is intended to be used.
locals_d : Dict
This is a dictionary of local variables that will be the environment
for an evaluation of an expression that begins with @
sample_size : int, optional
Sample alternatives with sample of given size. By default is None,
which does not sample alternatives.
Returns
-------
ret : pandas.Series
A series where index should match the index of the choosers DataFrame
and values will match the index of the alternatives DataFrame -
choices are simulated in the standard Monte Carlo fashion
"""
if len(spec.columns) > 1:
raise RuntimeError('spec must have only one column')
sample_size = sample_size or len(alternatives)
if sample_size > len(alternatives):
print "clipping sample size %s to len(alternatives) %s" % (sample_size, len(alternatives))
sample_size = min(sample_size, len(alternatives))
alternatives[alternatives.index.name] = alternatives.index
df = interaction_dataset(choosers, alternatives, sample_size)
if skims:
add_skims(df, skims)
model_design = eval_variables(spec.index, df, locals_d)
_check_for_variability(model_design)
utilities = model_design.dot(spec).astype('float')
utilities = pd.DataFrame(
utilities.as_matrix().reshape(len(choosers), sample_size),
index=choosers.index)
probs = utils_to_probs(utilities)
positions = make_choices(probs)
offsets = np.arange(len(positions)) * sample_size
choices = model_design.index.take(positions + offsets)
return pd.Series(choices, index=choosers.index), model_design
def other_than(groups, bools):
"""
Construct a Series that has booleans indicating the presence of
something- or someone-else with a certain property within a group.
Parameters
----------
groups : pandas.Series
A column with the same index as `bools` that defines the grouping
of `bools`. The `bools` Series will be used to index `groups` and
then the grouped values will be counted.
bools : pandas.Series
A boolean Series indicating where the property of interest is present.
Should have the same index as `groups`.
Returns
-------
others : pandas.Series
A boolean Series with the same index as `groups` and `bools`
indicating whether there is something- or something-else within
a group with some property (as indicated by `bools`).
"""
counts = groups[bools].value_counts()
merge_col = groups.to_frame(name='right')
pipeline = tz.compose(
tz.curry(pd.Series.fillna, value=False),
itemgetter('left'),
tz.curry(
pd.DataFrame.merge, right=merge_col, how='right', left_index=True,
right_on='right'),
tz.curry(pd.Series.to_frame, name='left'))
gt0 = pipeline(counts > 0)
gt1 = pipeline(counts > 1)
return gt1.where(bools, other=gt0)
|
data/Impactstory/total-impact-webapp/totalimpactwebapp/tweet.py
|
from totalimpactwebapp import json_sqlalchemy
from util import commit
from util import cached_property
from util import dict_from_dir
from util import as_int_or_float_if_possible
from totalimpactwebapp import db
from totalimpactwebapp.tweeter import Tweeter
from birdy.twitter import AppClient, TwitterApiError, TwitterRateLimitError, TwitterClientError
from collections import defaultdict
from sqlalchemy import case
import os
import re
import datetime
import logging
logger = logging.getLogger('ti.tweet')
def tweets_from_tiids(tiids):
if not tiids:
return []
tweets = db.session.query(Tweet).filter(Tweet.tiid.in_(tiids)).all()
return tweets
def get_product_tweets_for_profile(profile_id):
tweets = db.session.query(Tweet).filter(Tweet.profile_id==profile_id).all()
response = defaultdict(list)
for tweet in tweets:
if tweet.tiid and tweet.tweet_text:
response[tweet.tiid].append(tweet)
return response
def store_tweet_payload_and_tweeter_from_twitter(payload_dicts_from_twitter, tweets):
tweets_by_tweet_id = defaultdict(list)
for tweet in tweets:
tweets_by_tweet_id[tweet.tweet_id].append(tweet)
for payload_dict in payload_dicts_from_twitter:
tweet_id = payload_dict["id_str"]
logger.debug("saving unsaved parts for tweet_id {tweet_id}".format(
tweet_id=tweet_id))
for tweet in tweets_by_tweet_id[tweet_id]:
if not tweet.payload:
tweet.payload = payload_dict
logger.info(u"updated tweet payload for {tweet_id} {tiid}".format(
tweet_id=tweet_id, tiid=tweet.tiid))
if "user" in payload_dict:
try:
tweet.tweeter.set_attributes_from_twitter_data(payload_dict["user"])
except AttributeError:
tweeter = Tweeter.query.get(tweet.screen_name)
if not tweeter:
tweeter = Tweeter(screen_name=tweet.screen_name)
db.session.add(tweeter)
tweeter.set_attributes_from_twitter_data(payload_dict["user"])
tweet.tweeter = tweeter
commit(db)
if tweet.tweeter:
logger.info(u"updated tweeter followers for {screen_name}".format(
screen_name=tweet.tweeter.screen_name))
def flag_deleted_tweets(tweet_ids):
if not tweet_ids:
return None
for tweet in Tweet.query.filter(Tweet.tweet_id.in_(tweet_ids)).all():
tweet.is_deleted = True
db.session.merge(tweet)
def handle_all_tweets(data, tweets):
store_tweet_payload_and_tweeter_from_twitter(data, tweets)
tweet_ids = [tweet.tweet_id for tweet in tweets]
tweet_ids_with_response = [tweet["id_str"] for tweet in data]
tweet_ids_without_response = [tweet for tweet in tweet_ids if tweet not in tweet_ids_with_response]
flag_deleted_tweets(tweet_ids_without_response)
return True
class AppDictClient(AppClient):
@staticmethod
def get_json_object_hook(data):
return data
def get_and_save_tweet_text_and_tweeter_followers(tweets):
client = AppDictClient(
os.getenv("TWITTER_CONSUMER_KEY"),
os.getenv("TWITTER_CONSUMER_SECRET"),
access_token=os.getenv("TWITTER_ACCESS_TOKEN")
)
logger.info(u"in get_and_save_tweet_text_and_tweeter_followers for {num} tweet_ids".format(
num=len(tweets)))
group_size = 100
list_of_groups = [ tweets[i:i+group_size] for i in range(0, len(tweets), group_size) ]
for tweet_subset in list_of_groups:
tweet_id_string = ",".join([tweet.tweet_id for tweet in tweet_subset])
try:
response = client.api.statuses.lookup.post(id=tweet_id_string, trim_user=False)
handle_all_tweets(response.data, tweet_subset)
except TwitterApiError, e:
logger.exception("TwitterApiError error, skipping")
except TwitterClientError, e:
logger.exception("TwitterClientError error, skipping")
except TwitterRateLimitError, e:
logger.exception("TwitterRateLimitError error, skipping")
return
def hydrate_twitter_text_and_followers(profile_id, altmetric_twitter_posts):
logger.info(u"in hydrate_twitter_text_and_followers for profile {profile_id}".format(
profile_id=profile_id))
tweets_to_hydrate_from_twitter = []
tweets = Tweet.query.filter(Tweet.profile_id==profile_id)
tweet_dict = dict([((tweet.tweet_id, tweet.tiid), tweet) for tweet in tweets])
for tiid, post_list in altmetric_twitter_posts.iteritems():
for post in post_list:
tweet_id = post["tweet_id"]
screen_name = post["author"]["id_on_source"]
if (tweet_id, tiid) in tweet_dict.keys():
tweet = tweet_dict[(tweet_id, tiid)]
if not tweet.tweet_text and not tweet.is_deleted:
tweets_to_hydrate_from_twitter.append(tweet)
else:
if not Tweet.query.get((tweet_id, tiid)):
tweet = Tweet(tweet_id=tweet_id, tiid=tiid)
tweet.set_attributes_from_altmetric_post(post)
tweet.profile_id = profile_id
tweets_to_hydrate_from_twitter.append(tweet)
db.session.add(tweet)
if not tweet.tweeter:
tweeter = Tweeter.query.get(screen_name)
if not tweeter:
tweeter = Tweeter(screen_name=screen_name)
db.session.add(tweeter)
tweeter.set_attributes_from_altmetric_post(post)
commit(db)
logger.info(u"before tweets_to_hydrate_from_twitter for {profile_id}".format(
profile_id=profile_id))
if tweets_to_hydrate_from_twitter:
commit(db)
tweet_ids = [tweet.tweet_id for tweet in tweets_to_hydrate_from_twitter]
logger.info(u"calling get_and_save_tweet_text_and_tweeter_followers for profile {profile_id}".format(
profile_id=profile_id))
get_and_save_tweet_text_and_tweeter_followers(tweets_to_hydrate_from_twitter)
commit(db)
else:
logger.info(u"no tweets to hydrate for profile {profile_id}".format(
profile_id=profile_id))
return
handle_workaround_join_string = "remote(Tweeter.screen_name)==case([(foreign(Tweet.screen_name)=='Dr_Bik', 'hollybik')], else_=foreign(Tweet.screen_name))"
class Tweet(db.Model):
tweet_id = db.Column(db.Text, primary_key=True)
tiid = db.Column(db.Text, primary_key=True)
profile_id = db.Column(db.Integer, db.ForeignKey('profile.id'))
screen_name = db.Column(db.Text, db.ForeignKey('tweeter.screen_name'))
tweet_timestamp = db.Column(db.DateTime())
payload = db.Column(json_sqlalchemy.JSONAlchemy(db.Text))
is_deleted = db.Column(db.Boolean)
tweet_url = db.Column(db.Text)
country = db.Column(db.Text)
followers_at_time_of_tweet = db.Column(db.Integer)
tweeter = db.relationship(
'Tweeter',
lazy='joined',
cascade='all',
backref=db.backref("tweet"),
uselist=False,
primaryjoin=handle_workaround_join_string
)
def __init__(self, **kwargs):
if "payload" in kwargs:
payload_dict = kwargs["payload"]
kwargs["tweet_id"] = payload_dict["id_str"]
kwargs["screen_name"] = payload_dict["user"]["screen_name"]
kwargs["payload"] = payload_dict
kwargs["tweet_timestamp"] = datetime.datetime.strptime(payload_dict["created_at"], r"%a %b %d %H:%M:%S +0000 %Y")
if not "country" in kwargs:
try:
kwargs["country"] = payload_dict["place"]["country_code"]
except (AttributeError, TypeError):
pass
super(Tweet, self).__init__(**kwargs)
@classmethod
def most_recent_tweet_id(cls, screen_name):
screen_name = screen_name.replace("@", "")
q = db.session.query(Tweet).filter(Tweet.screen_name==screen_name).order_by(Tweet.tweet_timestamp.desc())
tweet = q.first()
try:
tweet_id = tweet.tweet_id
except AttributeError:
tweet_id = None
return tweet_id
@cached_property
def tweet_text(self):
try:
return self.payload["text"]
except TypeError:
return None
@cached_property
def tweet_text_with_links(self):
if self.tweet_text is None:
return None
ret = self.tweet_text
ret = re.sub(r"(http://.+?)(\s|$)", r"<link>", ret)
for url_info in self.urls:
my_link = u" <a class='linkout entity' href='{url}'>{display_url}</a> ".format(
url=url_info["expanded_url"],
display_url=url_info["display_url"]
)
ret = re.sub(r"<link>", my_link, ret, 1)
ret = re.sub(r"(^|[^
ret = re.sub(r"(^|[^@\w])@(\w+)\b", r"\1<a href='http://twitter.com/\2' class='entity at-name'>@\2</a> ", ret)
return ret
@cached_property
def urls(self):
try:
return self.payload["entities"]["urls"]
except TypeError:
return None
except KeyError:
return []
@cached_property
def has_country(self):
return self.country != None
def set_attributes_from_altmetric_post(self, post):
self.tweet_id = post["tweet_id"]
self.screen_name = post["author"]["id_on_source"]
self.tweet_timestamp = post["posted_on"]
if "geo" in post["author"]:
self.country = post["author"]["geo"].get("country", None)
return self
def __repr__(self):
return u'<Tweet {tweet_id} {profile_id} {screen_name} {timestamp}>'.format(
tweet_id=self.tweet_id,
profile_id=self.profile_id,
screen_name=self.screen_name,
timestamp=self.tweet_timestamp)
def to_dict(self):
attributes_to_ignore = [
"payload"
]
ret = dict_from_dir(self, attributes_to_ignore)
return ret
twitter_example_contents = """{
"contributors": null,
"coordinates": null,
"created_at": "Sun Dec 16 22:42:55 +0000 2012",
"entities": {
"hashtags": [
{
"indices": [
72,
81
],
"text": "scholars"
}
],
"symbols": [],
"urls": [
{
"display_url": "shar.es/hfqDY",
"expanded_url": "http://shar.es/hfqDY",
"indices": [
83,
103
],
"url": "http://t.co/GDwhOrnu"
}
],
"user_mentions": [
{
"id": 259990583,
"id_str": "259990583",
"indices": [
3,
11
],
"name": "Karen Lips",
"screen_name": "kwren88"
},
{
"id": 224631899,
"id_str": "224631899",
"indices": [
17,
26
],
"name": "figshare",
"screen_name": "figshare"
}
]
},
"favorite_count": 0,
"favorited": false,
"geo": null,
"id": 280442912347664384,
"id_str": "280442912347664384",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"lang": "en",
"place": null,
"possibly_sensitive": false,
"retweet_count": 5,
"retweeted": false,
"retweeted_status": {
"contributors": null,
"coordinates": {
"coordinates": [
-77.01357981,
39.01103526
],
"type": "Point"
},
"created_at": "Sun Dec 16 22:34:13 +0000 2012",
"entities": {
"hashtags": [
{
"indices": [
59,
68
],
"text": "scholars"
}
],
"symbols": [],
"urls": [
{
"display_url": "shar.es/hfqDY",
"expanded_url": "http://shar.es/hfqDY",
"indices": [
70,
90
],
"url": "http://t.co/GDwhOrnu"
}
],
"user_mentions": [
{
"id": 224631899,
"id_str": "224631899",
"indices": [
4,
13
],
"name": "figshare",
"screen_name": "figshare"
}
]
},
"favorite_count": 0,
"favorited": false,
"geo": {
"coordinates": [
39.01103526,
-77.01357981
],
"type": "Point"
},
"id": 280440721884983297,
"id_str": "280440721884983297",
"in_reply_to_screen_name": null,
"in_reply_to_status_id": null,
"in_reply_to_status_id_str": null,
"in_reply_to_user_id": null,
"in_reply_to_user_id_str": null,
"lang": "en",
"place": {
"attributes": {},
"bounding_box": {
"coordinates": [
[
[
-77.064086,
38.979735
],
[
-76.97162,
38.979735
],
[
-76.97162,
39.036964
],
[
-77.064086,
39.036964
]
]
],
"type": "Polygon"
},
"contained_within": [],
"country": "United States",
"country_code": "US",
"full_name": "Silver Spring, MD",
"id": "6417871953fa5e86",
"name": "Silver Spring",
"place_type": "city",
"url": "https://api.twitter.com/1.1/geo/id/6417871953fa5e86.json"
},
"possibly_sensitive": false,
"retweet_count": 5,
"retweeted": false,
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "MT \"@figshare: Prevalence and use of Twitter growing among
"truncated": false,
"user": {
"contributors_enabled": false,
"created_at": "Thu Mar 03 00:30:46 +0000 2011",
"default_profile": false,
"default_profile_image": false,
"description": "Amphibian Ecologist. Associate Professor Biology, UMaryland. Director, Graduate Program in Sustainable Development & Conservation Biology. tweets my own",
"entities": {
"description": {
"urls": []
},
"url": {
"urls": [
{
"display_url": "lipslab.weebly.com",
"expanded_url": "http://lipslab.weebly.com/",
"indices": [
0,
22
],
"url": "http://t.co/8sw0WzjuIn"
}
]
}
},
"favourites_count": 2979,
"follow_request_sent": null,
"followers_count": 1767,
"following": null,
"friends_count": 946,
"geo_enabled": true,
"id": 259990583,
"id_str": "259990583",
"is_translation_enabled": false,
"is_translator": false,
"lang": "en",
"listed_count": 92,
"location": "",
"name": "Karen Lips",
"notifications": null,
"profile_background_color": "C0DEED",
"profile_background_image_url": "http://pbs.twimg.com/profile_background_images/795249398/fae1497afc5e983974518244cf4aaba2.jpeg",
"profile_background_image_url_https": "https://pbs.twimg.com/profile_background_images/795249398/fae1497afc5e983974518244cf4aaba2.jpeg",
"profile_background_tile": false,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/259990583/1348775951",
"profile_image_url": "http://pbs.twimg.com/profile_images/3495233234/70ac2d2c7299e4b04febca2beb83b74f_normal.png",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/3495233234/70ac2d2c7299e4b04febca2beb83b74f_normal.png",
"profile_link_color": "0089B3",
"profile_sidebar_border_color": "FFFFFF",
"profile_sidebar_fill_color": "361645",
"profile_text_color": "02606A",
"profile_use_background_image": true,
"protected": false,
"screen_name": "kwren88",
"statuses_count": 11928,
"time_zone": "Eastern Time (US & Canada)",
"url": "http://t.co/8sw0WzjuIn",
"utc_offset": -14400,
"verified": false
}
},
"source": "<a href=\"http://twitter.com/download/iphone\" rel=\"nofollow\">Twitter for iPhone</a>",
"text": "RT @kwren88: MT \"@figshare: Prevalence and use of Twitter growing among
"truncated": false,
"user": {
"contributors_enabled": false,
"created_at": "Tue Mar 29 14:48:17 +0000 2011",
"default_profile": false,
"default_profile_image": false,
"description": "Postdoc,lazy blogger, co-host of http://t.co/uz2JfRCfki podcast. Interests: herpetology, behavioral ecology, evolution, genes and behavior. I heart salamanders.",
"entities": {
"description": {
"urls": [
{
"display_url": "Breakingbio.com",
"expanded_url": "http://Breakingbio.com",
"indices": [
33,
55
],
"url": "http://t.co/uz2JfRCfki"
}
]
},
"url": {
"urls": [
{
"display_url": "natureafield.com",
"expanded_url": "http://www.natureafield.com",
"indices": [
0,
22
],
"url": "http://t.co/I0kb1Imd6b"
}
]
}
},
"favourites_count": 789,
"follow_request_sent": null,
"followers_count": 1128,
"following": null,
"friends_count": 477,
"geo_enabled": true,
"id": 274000727,
"id_str": "274000727",
"is_translation_enabled": false,
"is_translator": false,
"lang": "en",
"listed_count": 91,
"location": "Buenos Aires, Argentina",
"name": "Heidi K Smith-Parker",
"notifications": null,
"profile_background_color": "FED105",
"profile_background_image_url": "http://pbs.twimg.com/profile_background_images/259765740/x069eeb809c51076b2883e31fbce942f.png",
"profile_background_image_url_https": "https://pbs.twimg.com/profile_background_images/259765740/x069eeb809c51076b2883e31fbce942f.png",
"profile_background_tile": true,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/274000727/1349651541",
"profile_image_url": "http://pbs.twimg.com/profile_images/2963151089/d9cfaa7ab235dcd1ad3430d534c23929_normal.jpeg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/2963151089/d9cfaa7ab235dcd1ad3430d534c23929_normal.jpeg",
"profile_link_color": "E9BF05",
"profile_sidebar_border_color": "3B3B3B",
"profile_sidebar_fill_color": "3B3B3B",
"profile_text_color": "989898",
"profile_use_background_image": true,
"protected": false,
"screen_name": "HeidiKayDeidi",
"statuses_count": 7747,
"time_zone": null,
"url": "http://t.co/I0kb1Imd6b",
"utc_offset": null,
"verified": false
}
}"""
|
data/SEED-platform/seed/seed/tests/api/seed_readingtools.py
|
"""
:copyright (c) 2014 - 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Department of Energy) and contributors. All rights reserved.
:author
"""
import logging
import pprint
import json
import os
import requests
import csv
import datetime as dt
import time
from calendar import timegm
def upload_file(upload_header, upload_filepath, main_url, upload_dataset_id, upload_datatype):
"""
Checks if the upload is through an AWS system or through file system.
Proceeds with the appropriate upload method.
- uploadFilepath: full path to file
- uploadDatasetID: What ImportRecord to associate file with.
- uploadDatatype: Type of data in file (Assessed Raw, Portfolio Raw)
"""
def _upload_file_to_aws(aws_upload_details):
"""
This code is from the original APIClient.
Implements uploading a data file to S3 directly.
This is a 3-step process:
1. SEED instance signs the upload request.
2. File is uploaded to S3 with signature included.
3. Client notifies SEED instance when upload completed.
@TODO: Currently can only upload to s3.amazonaws.com, though there are
other S3-compatible services that could be drop-in replacements.
Args:
- AWSuploadDetails: Results from 'get_upload_details' endpoint;
contains details about where to send file and how.
Returns:
{"import_file_id": 54,
"success": true,
"filename": "DataforSEED_dos15.csv"}
"""
sig_uri = aws_upload_details['signature']
now = dt.datetime.utcnow()
expires = now + dt.timedelta(hours=1)
now_ts = timegm(now.timetuple())
key = 'data_imports/%s.%s' % (filename, now_ts)
payload = {}
payload['expiration'] = expires.isoformat() + 'Z'
payload['conditions'] = [
{'bucket': aws_upload_details['aws_bucket_name']},
{'Content-Type': 'text/csv'},
{'acl': 'private'},
{'success_action_status': '200'},
{'key': key}
]
sig_result = requests.post(main_url + sig_uri,
headers=upload_header,
data=json.dumps(payload))
if sig_result.status_code != 200:
msg = "Something went wrong with signing document."
raise RuntimeError(msg)
else:
sig_result = sig_result.json()
upload_url = "http://%s.s3.amazonaws.com/" % (aws_upload_details['aws_bucket_name'])
s3_payload = [
('key', key),
('AWSAccessKeyId', aws_upload_details['aws_client_key']),
('Content-Type', 'text/csv'),
('success_action_status', '200'),
('acl', 'private'),
('policy', sig_result['policy']),
('signature', sig_result['signature']),
('file', (filename, open(upload_filepath, 'rb')))
]
result = requests.post(upload_url,
files=s3_payload)
if result.status_code != 200:
msg = "Something went wrong with the S3 upload: %s " % result.reason
raise RuntimeError(msg)
completion_uri = aws_upload_details['upload_complete']
completion_payload = {
'import_record': upload_dataset_id,
'key': key,
'source_type': upload_datatype
}
return requests.get(main_url + completion_uri,
headers=upload_header,
params=completion_payload)
def _upload_file_to_file_system(upload_details):
"""
Implements uploading to SEED's file system. Used by
upload_file if SEED in configured for local file storage.
Args:
FSYSuploadDetails: Results from 'get_upload_details' endpoint;
contains details about where to send file and how.
Returns:
{"import_file_id": 54,
"success": true,
"filename": "DataforSEED_dos15.csv"}
"""
upload_url = "%s%s" % (main_url, upload_details['upload_path'])
fsysparams = {'qqfile': upload_filepath,
'import_record': upload_dataset_id,
'source_type': upload_datatype}
return requests.post(upload_url,
params=fsysparams,
files={'filename': open(upload_filepath, 'rb')},
headers=upload_header)
upload_details = requests.get(main_url + '/data/get_upload_details/', headers=upload_header)
upload_details = upload_details.json()
filename = os.path.basename(upload_filepath)
if upload_details['upload_mode'] == 'S3':
return _upload_file_to_aws(upload_details)
elif upload_details['upload_mode'] == 'filesystem':
return _upload_file_to_file_system(upload_details)
else:
raise RuntimeError("Upload mode unknown: %s" %
upload_details['upload_mode'])
def check_status(resultOut, partmsg, log, PIIDflag=None):
"""Checks the status of the API endpoint and makes the appropriate print outs."""
if resultOut.status_code in [200, 403, 401]:
if PIIDflag == 'cleansing':
msg = pprint.pformat(resultOut.json(), indent=2, width=70)
else:
try:
if 'status' in resultOut.json().keys() and resultOut.json()['status'] == 'error':
msg = resultOut.json()['message']
log.error(partmsg + '...not passed')
log.debug(msg)
raise RuntimeError
elif 'success' in resultOut.json().keys() and not resultOut.json()['success']:
msg = resultOut.json()
log.error(partmsg + '...not passed')
log.debug(msg)
raise RuntimeError
else:
if PIIDflag == 'organizations':
msg = 'Number of organizations:\t' + str(len(resultOut.json()['organizations'][0]))
elif PIIDflag == 'users':
msg = 'Number of users:\t' + str(len(resultOut.json()['users'][0]))
elif PIIDflag == 'mappings':
msg = pprint.pformat(resultOut.json()['suggested_column_mappings'], indent=2, width=70)
elif PIIDflag == 'PM_filter':
msg = "Duplicates: " + str(resultOut.json()['duplicates']) + ", Unmatched: " + str(resultOut.json()['unmatched']) + ", Matched: " + str(resultOut.json()['matched'])
else:
msg = pprint.pformat(resultOut.json(), indent=2, width=70)
except:
log.error(partmsg, '...not passed')
log.debug('Unknown error during request results recovery')
raise RuntimeError
log.info(partmsg + '...passed')
log.debug(msg)
else:
msg = resultOut.reason
log.error(partmsg + '...not passed')
log.debug(msg)
raise RuntimeError
return
def check_progress(mainURL, Header, progress_key):
"""Delays the sequence until progress is at 100 percent."""
time.sleep(5)
progressResult = requests.get(mainURL + '/app/progress/',
headers=Header,
data=json.dumps({'progress_key': progress_key}))
if progressResult.json()['progress'] == 100:
return (progressResult)
else:
progressResult = check_progress(mainURL, Header, progress_key)
def read_map_file(mapfilePath):
"""Read in the mapping file"""
assert (os.path.isfile(mapfilePath)), "Cannot find file:\t" + mapfilePath
mapReader = csv.reader(open(mapfilePath, 'r'))
mapReader.next()
maplist = list()
for rowitem in mapReader:
maplist.append(rowitem)
return maplist
def setup_logger(filename):
"""Set-up the logger object"""
logging.getLogger("requests").setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
formatter_console = logging.Formatter('%(levelname)s - %(message)s')
fh = logging.FileHandler(filename, mode='a')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter_console)
logger.addHandler(ch)
return logger
|
data/PyHDI/PyCoRAM/pycoram/utils/coram2pycoram.py
|
from __future__ import absolute_import
from __future__ import print_function
import re
import sys
import os
def getRamId(oid, sid):
if 0 <= sid and sid <= 31:
return 0
if 32 <= sid and sid <= 63:
return 1
if 64 <= sid and sid <= 95:
return 2
if 96 <= sid and sid <= 127:
return 3
def getRamSubId(oid, sid):
if 0 <= sid and sid <= 31:
return sid
if 32 <= sid and sid <= 63:
return sid - 32
if 64 <= sid and sid <= 95:
return sid - 64
if 96 <= sid and sid <= 127:
return sid - 96
def getChannelId(oid, sid):
return oid
def getChannelSubId(oid, sid):
return sid
def getRegisterId(oid, sid):
return oid
def getRegisterSubId(oid, sid):
return sid
def main():
f = open(sys.argv[1], 'r')
lines = f.readlines()
output = []
p_thread = re.compile('(.*)/\*THREAD\*/(.*)')
p_thread_id = re.compile('(.*)/\*THREAD_ID\*/(.*)')
p_object_id = re.compile('(.*)/\*OBJECT_ID\*/(.*)')
p_width = re.compile('(.*)/\*WIDTH\*/(.*)')
p_depth = re.compile('(.*)/\*DEPTH\*/(.*)')
p_indexwidth = re.compile('(.*)/\*INDEXWIDTH\*/(.*)')
p_logdepth = re.compile('(.*)/\*LOGDEPTH\*/(.*)')
p_sub_id = re.compile('(.*)/\*SUB_ID\*/(.*)')
module_name = None
thread_name = None
thread_id = None
object_id = None
sub_id = None
width = None
indexwidth = None
depth = None
mode = False
sub_id_num = None
sub_id_base = None
buffer = []
print("`include \"coram2pycoram.v\"")
for line in lines:
if not mode:
m = p_thread.match(line)
if m:
thread_name = re.match('.*(".*").*', m.group(2)).group(1)
module_name = re.search('(CORAM.*?|ChannelFIFO.*?|ChannelReg.*?)
mode = True
buffer = []
buffer.append(line)
continue
else:
m = p_thread_id.match(line)
if m:
tid_str = m.group(2)[1:-1]
thread_id = re.match('([0-9]*\'.)?([0-9a-fA-F]+)', tid_str).group(2)
buffer.append(line)
continue
m = p_object_id.match(line)
if m:
oid_str = m.group(2)[1:-1]
object_id = re.match('([0-9]*\'.)?([0-9a-fA-F]+)', oid_str).group(2)
buffer.append(line)
continue
m = p_width.match(line)
if m:
width_str = m.group(2)
width = re.match('(.*),', width_str).group(1)
buffer.append(line)
continue
m = p_depth.match(line)
if m:
depth_str = m.group(2)
depth = re.match('(.*),', depth_str).group(1)
buffer.append(line)
continue
m = p_indexwidth.match(line)
if m:
indexwidth_str = m.group(2)
indexwidth = re.match('(.*),', indexwidth_str).group(1)
buffer.append(line)
continue
m = p_logdepth.match(line)
if m:
logdepth_str = m.group(2)
logdepth = re.match('(.*),', logdepth_str).group(1)
buffer.append(line)
continue
m = p_sub_id.match(line)
if m:
sid_str = m.group(2)
sub_id_m = re.search('([0-9]*\'.)?([0-9a-fA-F]+)', sid_str)
sub_id = sub_id_m.group(0)
sub_id_num = sub_id_m.group(2)
sub_id_base = (10 if sub_id_m.group(1).count("'d") > 0 else
16 if sub_id_m.group(1).count("'h") > 0 else
2 if sub_id_m.group(1).count("'b") > 0 else
10)
buffer.append(line)
continue
if mode:
print("PY%s
print("/*CORAM_THREAD_NAME*/ %s," % ''.join((thread_name[:-1], '_', thread_id, '"')))
print("/*CORAM_THREAD_ID*/ %s," % thread_id)
if module_name.count('CORAM') > 0:
print("/*CORAM_ID*/ %d," % getRamId(int(object_id), int(sub_id_num, sub_id_base)))
if module_name.count('ChannelFIFO') > 0:
print("/*CORAM_ID*/ %d," % getChannelId(int(object_id), int(sub_id_num, sub_id_base)))
if module_name.count('ChannelRegister') > 0:
print("/*CORAM_ID*/ %d," % getRegisterId(int(object_id), int(sub_id_num, sub_id_base)))
if module_name.count('CORAM') > 0:
print("/*CORAM_SUB_ID*/ %s," % getRamSubId(int(object_id), int(sub_id_num, sub_id_base)))
if module_name.count('ChannelFIFO') > 0:
print("/*CORAM_SUB_ID*/ %s," % '0')
if module_name.count('ChannelRegister') > 0:
print("/*CORAM_SUB_ID*/ %s," % '0')
print("/*CORAM_ADDR_LEN*/ %s," % indexwidth)
print("/*CORAM_DATA_WIDTH*/ %s," % width)
print("/*THREAD*/ %s," % thread_name)
print(''.join(buffer[1:]))
mode = False
print(line, end='')
main()
|
data/Lawouach/WebSocket-for-Python/example/echo_gevent_client.py
|
from gevent import monkey; monkey.patch_all()
import gevent
from ws4py.client.geventclient import WebSocketClient
if __name__ == '__main__':
ws = WebSocketClient('ws://localhost:9000/ws', protocols=['http-only', 'chat'])
ws.connect()
ws.send("Hello world")
print((ws.receive(),))
ws.send("Hello world again")
print((ws.receive(),))
def incoming():
while True:
m = ws.receive()
if m is not None:
m = str(m)
print((m, len(m)))
if len(m) == 35:
ws.close()
break
else:
break
print(("Connection closed!",))
def outgoing():
for i in range(0, 40, 5):
ws.send("*" * i)
ws.send("Foobar")
greenlets = [
gevent.spawn(incoming),
gevent.spawn(outgoing),
]
gevent.joinall(greenlets)
|
data/MongoEngine/mongoengine/mongoengine/dereference.py
|
from bson import DBRef, SON
from mongoengine.python_support import txt_type
from base import (
BaseDict, BaseList, EmbeddedDocumentList,
TopLevelDocumentMetaclass, get_document
)
from fields import (ReferenceField, ListField, DictField, MapField)
from connection import get_db
from queryset import QuerySet
from document import Document, EmbeddedDocument
class DeReference(object):
def __call__(self, items, max_depth=1, instance=None, name=None):
"""
Cheaply dereferences the items to a set depth.
Also handles the conversion of complex data types.
:param items: The iterable (dict, list, queryset) to be dereferenced.
:param max_depth: The maximum depth to recurse to
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param get: A boolean determining if being called by __get__
"""
if items is None or isinstance(items, basestring):
return items
if isinstance(items, QuerySet):
items = [i for i in items]
self.max_depth = max_depth
doc_type = None
if instance and isinstance(instance, (Document, EmbeddedDocument,
TopLevelDocumentMetaclass)):
doc_type = instance._fields.get(name)
while hasattr(doc_type, 'field'):
doc_type = doc_type.field
if isinstance(doc_type, ReferenceField):
field = doc_type
doc_type = doc_type.document_type
is_list = not hasattr(items, 'items')
if is_list and all([i.__class__ == doc_type for i in items]):
return items
elif not is_list and all(
[i.__class__ == doc_type for i in items.values()]):
return items
elif not field.dbref:
if not hasattr(items, 'items'):
def _get_items(items):
new_items = []
for v in items:
if isinstance(v, list):
new_items.append(_get_items(v))
elif not isinstance(v, (DBRef, Document)):
new_items.append(field.to_python(v))
else:
new_items.append(v)
return new_items
items = _get_items(items)
else:
items = dict([
(k, field.to_python(v))
if not isinstance(v, (DBRef, Document)) else (k, v)
for k, v in items.iteritems()]
)
self.reference_map = self._find_references(items)
self.object_map = self._fetch_objects(doc_type=doc_type)
return self._attach_objects(items, 0, instance, name)
def _find_references(self, items, depth=0):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
"""
reference_map = {}
if not items or depth >= self.max_depth:
return reference_map
if not hasattr(items, 'items'):
iterator = enumerate(items)
else:
iterator = items.iteritems()
depth += 1
for k, item in iterator:
if isinstance(item, (Document, EmbeddedDocument)):
for field_name, field in item._fields.iteritems():
v = item._data.get(field_name, None)
if isinstance(v, DBRef):
reference_map.setdefault(field.document_type, set()).add(v.id)
elif isinstance(v, (dict, SON)) and '_ref' in v:
reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
field_cls = getattr(getattr(field, 'field', None), 'document_type', None)
references = self._find_references(v, depth)
for key, refs in references.iteritems():
if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):
key = field_cls
reference_map.setdefault(key, set()).update(refs)
elif isinstance(item, DBRef):
reference_map.setdefault(item.collection, set()).add(item.id)
elif isinstance(item, (dict, SON)) and '_ref' in item:
reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id)
elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:
references = self._find_references(item, depth - 1)
for key, refs in references.iteritems():
reference_map.setdefault(key, set()).update(refs)
return reference_map
def _fetch_objects(self, doc_type=None):
"""Fetch all references and convert to their document objects
"""
object_map = {}
for collection, dbrefs in self.reference_map.iteritems():
if hasattr(collection, 'objects'):
col_name = collection._get_collection_name()
refs = [dbref for dbref in dbrefs
if (col_name, dbref) not in object_map]
references = collection.objects.in_bulk(refs)
for key, doc in references.iteritems():
object_map[(col_name, key)] = doc
else:
if isinstance(doc_type, (ListField, DictField, MapField,)):
continue
refs = [dbref for dbref in dbrefs
if (collection, dbref) not in object_map]
if doc_type:
references = doc_type._get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
doc = doc_type._from_son(ref)
object_map[(collection, doc.id)] = doc
else:
references = get_db()[collection].find({'_id': {'$in': refs}})
for ref in references:
if '_cls' in ref:
doc = get_document(ref["_cls"])._from_son(ref)
elif doc_type is None:
doc = get_document(
''.join(x.capitalize()
for x in collection.split('_')))._from_son(ref)
else:
doc = doc_type._from_son(ref)
object_map[(collection, doc.id)] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
"""
if not items:
if isinstance(items, (BaseDict, BaseList)):
return items
if instance:
if isinstance(items, dict):
return BaseDict(items, instance, name)
else:
return BaseList(items, instance, name)
if isinstance(items, (dict, SON)):
if '_ref' in items:
return self.object_map.get(
(items['_ref'].collection, items['_ref'].id), items)
elif '_cls' in items:
doc = get_document(items['_cls'])._from_son(items)
_cls = doc._data.pop('_cls', None)
del items['_cls']
doc._data = self._attach_objects(doc._data, depth, doc, None)
if _cls is not None:
doc._data['_cls'] = _cls
return doc
if not hasattr(items, 'items'):
is_list = True
list_type = BaseList
if isinstance(items, EmbeddedDocumentList):
list_type = EmbeddedDocumentList
as_tuple = isinstance(items, tuple)
iterator = enumerate(items)
data = []
else:
is_list = False
iterator = items.iteritems()
data = {}
depth += 1
for k, v in iterator:
if is_list:
data.append(v)
else:
data[k] = v
if k in self.object_map and not is_list:
data[k] = self.object_map[k]
elif isinstance(v, (Document, EmbeddedDocument)):
for field_name, field in v._fields.iteritems():
v = data[k]._data.get(field_name, None)
if isinstance(v, DBRef):
data[k]._data[field_name] = self.object_map.get(
(v.collection, v.id), v)
elif isinstance(v, (dict, SON)) and '_ref' in v:
data[k]._data[field_name] = self.object_map.get(
(v['_ref'].collection, v['_ref'].id), v)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = txt_type("{0}.{1}.{2}").format(name, k, field_name)
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
item_name = '%s.%s' % (name, k) if name else name
data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name)
elif hasattr(v, 'id'):
data[k] = self.object_map.get((v.collection, v.id), v)
if instance and name:
if is_list:
return tuple(data) if as_tuple else list_type(data, instance, name)
return BaseDict(data, instance, name)
depth += 1
return data
|
data/acil-bwh/SlicerCIP/Scripted/CIP_Common/CIP/logic/StructuresParameters.py
|
from collections import OrderedDict
class StructuresParameters(object):
INF = 100000
"""Structures"""
structureTypes = OrderedDict()
structureTypes["UNDEFINED"] = (0, 0, 0, "Undefined structure", 0, 0, 0, -INF, INF , '')
slicePlane="Axial"
structureTypes['LeftHumerus'+slicePlane] = (1, 34, 0, 'Left Humerus (Axial)', 0.75, 0.05, 0, 900, 50, 'A')
structureTypes['RightHumerus'+slicePlane] = (2, 35, 0, 'Right Humerus (Axial)', 0.95, 0.23, 0.10, 900, 50, 'A')
structureTypes['LeftScapula'+slicePlane] = (3, 37, 0, 'Left Scapula (Axial)', 0.48, 0.66, 0.7, 900, 50, 'A')
structureTypes['RightScapula'+slicePlane] = (4, 38, 0, 'Right Scapula (Axial)', 0.68, 0.86, 0.9, 900, 50, 'A')
structureTypes['LeftPectoralis'+slicePlane] = (5, 56, 0, 'Left Pectoralis (Axial)', 0.65, 0.24, 0.24, 900, 50, 'A')
structureTypes['RightPectoralis'+slicePlane] = (6, 57, 0, 'Right Pectoralis (Axial)', 0.85, 0.44, 0.44, 900, 50, 'A')
structureTypes['TransversalAorta'+slicePlane] = (7, 46, 0, 'Transversal Aorta (Axial)', 0.05, 0.8, 0.2, 900, 50, 'A')
structureTypes['PulmonaryArtery'+slicePlane] = (8, 18, 0, 'Pulmonary Artery (Axial)', 0.1, 0.2, 0.7, 900, 50, 'A')
structureTypes['LeftCoronoraryArtery'+slicePlane] = (9, 50, 0, 'Left Coronorary Artery (Axial)', 0.2, 0.3, 0.1, 900, 50, 'A')
structureTypes['WholeHeart'+slicePlane] = (10, 16, 0, 'Whole Heart (Axial)', 0.9, 0.75, 0.1, 900, 50, 'A')
structureTypes['Liver'+slicePlane] = (11, 25, 0, 'Liver (Axial)', 0, 0.5, 0.5, 900, 50, 'A')
structureTypes['Spleen'+slicePlane] = (12, 26, 0, 'Spleen (Axial)', 0.4, 0.3, 0.9, 900, 50, 'A')
structureTypes['LeftKidney'+slicePlane] = (13, 43, 0, 'Left Kidney (Axial)', 0.05, 0.7, 0.2, 900, 50, 'A')
structureTypes['RightKidney'+slicePlane] = (14, 44, 0, 'Right Kidney (Axial)', 0.9, 0.2, 0.4, 900, 50, 'A')
slicePlane="Sagittal"
structureTypes['TransversalAorta'+slicePlane] = (31, 46, 0, 'Transversal Aorta (Sagittal)', 0.3, 0.1, 0.8, 900, 50, 'S')
structureTypes['AscendingAorta'+slicePlane] = (32, 45, 0, 'Ascending Aorta (Sagittal)', 0.8, 0.8, 0, 900, 50, 'S')
structureTypes['PulmonaryArtery'+slicePlane] = (33, 18, 0, 'Pulmonary Artery (Sagittal)', 0.2, 0.32, 0.1, 900, 50, 'S')
structureTypes['WholeHeart'+slicePlane] = (34, 16, 0, 'Whole Heart (Sagittal)', 0.1, 0.3, 0.57, 900, 50, 'S')
structureTypes['Sternum'+slicePlane] = (35, 32, 0, 'Sternum (Sagittal)', 0.4, 0.2, 0.2, 900, 50, 'S')
structureTypes['Trachea2'+slicePlane] = (36, 58, 0, 'Trachea (Sagittal)', 0.1, 0.5, 0.9, 900, 50, 'S')
structureTypes['Spine'+slicePlane] = (37, 51, 0, 'Spine (Sagittal)', 0.2, 0.8, 0.8, 900, 50, 'S')
structureTypes['Liver'+slicePlane] = (38, 25, 0, 'Liver (Sagittal)', 0, 1, 1, 900, 50, 'S')
structureTypes['LeftHilum'+slicePlane] = (39, 40, 0, 'Left Hilum (Sagittal)', 0.7, 0.3, 0.6, 900, 50, 'S')
structureTypes['RightHilum'+slicePlane] = (40, 41, 0, 'Right Hilum (Sagittal)', 0.5, 0.1, 0.4, 900, 50, 'S')
structureTypes['LeftVentricle'+slicePlane] = (41, 52, 0, 'Left Ventricle (Sagittal)', 0.2, 0.1, 0.3, 900, 50, 'S')
slicePlane="Coronal"
structureTypes['DescendingAorta'+slicePlane] = (51, 47, 0, 'Descending Aorta (Coronal)', 0.4, 0.2, 0.9, 900, 50, 'C')
structureTypes['Trachea2'+slicePlane] = (52, 58, 0, 'Trachea (Coronal)', 0.9, 0.4, 0.4, 900, 50, 'C')
structureTypes['AscendingAorta'+slicePlane] = (53, 45, 0, 'Ascending Aorta (Coronal)', 0.9, 0.9, 0.1, 900, 50, 'C')
structureTypes['Liver'+slicePlane] = (54, 25, 0, 'Liver (Coronal)', 0.1, 0.15, 0.15, 900, 50, 'C')
structureTypes['LeftVentricle'+slicePlane] = (55, 52, 0, 'Left Ventricle (Coronal)', 0.1, 0.2, 0.6, 900, 50, 'C')
structureTypes['LeftDiaphragm'+slicePlane] = (56, 64, 0, 'Left Diaphragm (Coronal)', 0.1, 0.8, 0.4, 900, 50, 'C')
structureTypes['LeftChestWall'+slicePlane] = (57, 62, 0, 'Left Chest Wall (Coronal)', 0.8, 0.3, 0.3, 900, 50, 'C')
structureTypes['RightChestWall'+slicePlane] = (58, 63, 0, 'Right Chest Wall (Coronal)', 0.6, 0.1, 0.1, 900, 50, 'C')
structureTypes['LeftSubclavian'+slicePlane] = (59, 48, 0, 'Left Subclavian Artery (Coronal)', 0.6, 0.7, 0.3, 900, 50, 'C')
structureTypes['Spine'+slicePlane] = (60, 51, 0, 'Spine (Coronal)', 0.2, 0.7, 0.2, 900, 50, 'C')
structureTypes['HernialHiatus'+slicePlane] = (61, 66, 81, 'Hernial Hiatus (Coronal)', 0.2, 0.5, 0.5, 900, 50, 'C')
"""
Ids:
- StructureId
- ChestRegion
- ChestType
- Red level (0-1)
- Green level (0-1)
- Blue level (0-1)
- WindowWidth
- WindowLevel
- Plane
"""
STRUCTURE_ID = 0
CHEST_REGION_ID = 1
CHEST_TYPE_ID = 2
DESCRIPTION = 3
RED = 4
GREEN = 5
BLUE = 6
WINDOW_WIDTH = 7
WINDOW_LEVEL = 8
PLANE = 9
def getItem(self, structureId):
return self.structureTypes[structureId]
def getIntCodeItem(self, structureId):
"""Get the integer code (ID)"""
return self.getItem(structureId)[self.STRUCTURE_ID]
def getRegionIdItem(self, structureId):
"""Get the Region id to which this structure is linked"""
return self.getItem(structureId)[self.CHEST_REGION_ID]
def getTypeIdItem(self, structureId):
"""Get the Region id to which this structure is linked"""
return self.getItem(structureId)[self.CHEST_TYPE_ID]
def getDescriptionItem(self, structureId):
"""Return the full description label"""
return self.getItem(structureId)[self.DESCRIPTION]
def getRedItem(self, structureId):
"""Get the Red value in an item from the mainParameters structure"""
return self.getItem(structureId)[self.RED]
def getGreenItem(self, structureId):
"""Get the Red value in an item from the mainParameters structure"""
return self.getItem(structureId)[self.GREEN]
def getBlueItem(self, structureId):
"""Get the Red value in an item from the mainParameters structure"""
return self.getItem(structureId)[self.BLUE]
def getWindowRange(self, structureId):
"""Returns a tuple (Window_size, Window_center_level) with the window range for the selected combination"""
item = self.getItem(structureId)
if not item:
return None
width = item[self.WINDOW_WIDTH]
level = item[self.WINDOW_LEVEL]
if width == self.INF or level == self.INF:
return None
return (width, level)
def getPlaneItem(self, structureId):
return self.getItem(structureId)[self.PLANE]
|
data/USArmyResearchLab/Dshell/decoders/templates/SessionDecoder.py
|
import dshell
import output
import util
class DshellDecoder(dshell.TCPDecoder):
'''generic session-level decoder template'''
def __init__(self, **kwargs):
'''decoder-specific config'''
'''pairs of 'option':{option-config}'''
self.optiondict = {}
'''bpf filter, for ipV4'''
self.filter = ''
'''filter function'''
'''init superclasses'''
self.__super__().__init__(**kwargs)
def packetHandler(self, udp, data):
'''handle UDP as Packet(),payload data
remove this if you want to make UDP into pseudo-sessions'''
pass
def connectionInitHandler(self, conn):
'''called when connection starts, before any data'''
pass
def blobHandler(self, conn, blob):
'''handle session data as soon as reassembly is possible'''
pass
def connectionHandler(self, conn):
'''handle session once all data is reassembled'''
pass
def connectionCloseHandler(self, conn):
'''called when connection ends, after data is handled'''
dObj = DshellDecoder()
|
data/PyHDI/veriloggen/tests/verilog/read_verilog_/module_str/test_read_verilog_module_str.py
|
from __future__ import absolute_import
from __future__ import print_function
import read_verilog_module_str
expected_verilog = """
module top
(
parameter WIDTH = 8
)
(
input CLK,
input RST,
output [WIDTH-1:0] LED
);
blinkled
(
.WIDTH(WIDTH)
)
inst_blinkled
(
.CLK(CLK),
.RST(RST),
.LED(LED)
);
endmodule
module blinkled
(
parameter WIDTH = 8
)
(
input CLK,
input RST,
output reg [WIDTH-1:0] LED
);
reg [32-1:0] count;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
if(count == 1023) begin
count <= 0;
end else begin
count <= count + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 0;
end else begin
if(count == 1023) begin
LED <= LED + 1;
end
end
end
endmodule
"""
def test():
test_module = read_verilog_module_str.mkTop()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
data/Yubico/python-pyhsm/pyhsm/cmd.py
|
"""
module for accessing a YubiHSM
"""
import re
import struct
__all__ = [
'reset',
'YHSM_Cmd',
]
import pyhsm.exception
import pyhsm.defines
class YHSM_Cmd():
"""
Base class for YubiHSM commands.
"""
response_status = None
executed = False
def __init__(self, stick, command, payload=''):
"""
The base class for all YSM_ commands.
@param stick: Reference to a YubiHSM
@param command: The YSM_xxx command defined in pyhsm.defines.
@param payload: a packed C struct, represented as a Python string
@type stick: L{pyhsm.stick.YHSM_Stick}
@type command: integer
@type payload: string
"""
self.stick = stick
self.command = command
self.payload = payload
return None
def execute(self, read_response=True):
"""
Write command to HSM and read response.
@param read_response: Whether to expect a response or not.
@type read_response: bool
"""
if self.command != pyhsm.defines.YSM_NULL:
cmd_buf = struct.pack('BB', len(self.payload) + 1, self.command)
else:
cmd_buf = chr(self.command)
cmd_buf += self.payload
debug_info = None
unlock = self.stick.acquire()
try:
if self.stick.debug:
debug_info = "%s (payload %i/0x%x)" % (pyhsm.defines.cmd2str(self.command), \
len(self.payload), len(self.payload))
self.stick.write(cmd_buf, debug_info)
if not read_response:
return None
return self._read_response()
finally:
unlock()
def _read_response(self):
"""
After writing a command, read response.
@returns: Result of parse_data()
@raises pyhsm.exception.YHSM_Error: On failure to read a response to the
command we sent in a timely fashion.
"""
res = self.stick.read(2, 'response length + response status')
if len(res) != 2:
self._handle_invalid_read_response(res, 2)
response_len, response_status = struct.unpack('BB', res)
response_len -= 1
debug_info = None
if response_status & pyhsm.defines.YSM_RESPONSE:
debug_info = "%s response (%i/0x%x bytes)" \
% (pyhsm.defines.cmd2str(response_status - pyhsm.defines.YSM_RESPONSE), \
response_len, response_len)
res = self.stick.read(response_len, debug_info)
if res:
if response_status == self.command | pyhsm.defines.YSM_RESPONSE:
self.executed = True
self.response_status = response_status
return self.parse_result(res)
else:
reset(self.stick)
raise pyhsm.exception.YHSM_Error('YubiHSM responded to wrong command')
else:
raise pyhsm.exception.YHSM_Error('YubiHSM did not respond')
def _handle_invalid_read_response(self, res, expected_len):
"""
This function is called when we do not get the expected frame header in
response to a command. Probable reason is that we are not talking to a
YubiHSM in HSM mode (might be a modem, or a YubiHSM in configuration mode).
Throws a hopefully helpful exception.
"""
if not res:
reset(self.stick)
raise pyhsm.exception.YHSM_Error('YubiHSM did not respond to command %s' \
% (pyhsm.defines.cmd2str(self.command)) )
self.stick.write('\r\r\r', '(mode test)')
res2 = self.stick.read(50)
lines = res2.split('\n')
for this in lines:
if re.match('^(NO_CFG|WSAPI|HSM).*> .*', this):
raise pyhsm.exception.YHSM_Error('YubiHSM is in configuration mode')
raise pyhsm.exception.YHSM_Error('Unknown response from serial device %s : "%s"' \
% (self.stick.device, res.encode('hex')))
def parse_result(self, data):
"""
This function is intended to be overridden by sub-classes that
implements commands that should not just return the data read from
the YubiHSM.
"""
return data
def reset(stick):
"""
Send a bunch of zero-bytes to the YubiHSM, and flush the input buffer.
"""
nulls = (pyhsm.defines.YSM_MAX_PKT_SIZE - 1) * '\x00'
res = YHSM_Cmd(stick, pyhsm.defines.YSM_NULL, payload = nulls).execute(read_response = False)
unlock = stick.acquire()
try:
stick.drain()
stick.flush()
finally:
unlock()
return res == 0
|
data/SMART-Lab/smartdispatch/smartdispatch/tests/test_smartdispatch.py
|
import os
import re
import shutil
import time as t
from os.path import join as pjoin
from StringIO import StringIO
import tempfile
from nose.tools import assert_true, assert_equal
from numpy.testing import assert_array_equal
import smartdispatch
from smartdispatch import utils
def test_generate_name_from_command():
date_length = 20
command = "command arg1 arg2"
expected = "_".join(command.split())
assert_equal(smartdispatch.generate_name_from_command(command)[date_length:], expected)
max_length_arg = 7
long_arg = "veryverylongarg1"
command = "command " + long_arg + " arg2"
expected = command.split()
expected[1] = long_arg[-max_length_arg:]
expected = "_".join(expected)
assert_equal(smartdispatch.generate_name_from_command(command, max_length_arg)[date_length:], expected)
max_length = 23
command = "command veryverylongarg1 veryverylongarg1 veryverylongarg1 veryverylongarg1"
expected = command[:max_length].replace(" ", "_")
assert_equal(smartdispatch.generate_name_from_command(command, max_length=max_length + date_length)[date_length:], expected)
command = "command path/number/one path/number/two"
expected = "command_pathnumberone_pathnumbertwo"
assert_equal(smartdispatch.generate_name_from_command(command)[date_length:], expected)
def test_get_commands_from_file():
commands = ["command1 arg1 arg2",
"command2",
"command3 arg1 arg2 arg3 arg4"]
fileobj = StringIO("\n".join(commands))
assert_array_equal(smartdispatch.get_commands_from_file(fileobj), commands)
fileobj = StringIO("\n".join(commands) + "\n")
assert_array_equal(smartdispatch.get_commands_from_file(fileobj), commands)
def test_unfold_command():
cmd = "ls"
assert_equal(smartdispatch.unfold_command(cmd), ["ls"])
cmd = "echo 1"
assert_equal(smartdispatch.unfold_command(cmd), ["echo 1"])
cmd = "echo [1 2]"
assert_equal(smartdispatch.unfold_command(cmd), ["echo 1", "echo 2"])
cmd = "echo test [1 2] yay"
assert_equal(smartdispatch.unfold_command(cmd), ["echo test 1 yay", "echo test 2 yay"])
cmd = "echo test[1 2]"
assert_equal(smartdispatch.unfold_command(cmd), ["echo test1", "echo test2"])
cmd = "echo test[1 2]yay"
assert_equal(smartdispatch.unfold_command(cmd), ["echo test1yay", "echo test2yay"])
cmd = "python my_command.py [0.01 0.000001 0.00000000001] -1 [omicron mu]"
assert_equal(smartdispatch.unfold_command(cmd), ["python my_command.py 0.01 -1 omicron",
"python my_command.py 0.01 -1 mu",
"python my_command.py 0.000001 -1 omicron",
"python my_command.py 0.000001 -1 mu",
"python my_command.py 0.00000000001 -1 omicron",
"python my_command.py 0.00000000001 -1 mu"])
cmd = "python my_command.py [0.01 0.000001 0.00000000001] -1 \[[42 133,666]\] slow [omicron mu]"
assert_equal(smartdispatch.unfold_command(cmd), ["python my_command.py 0.01 -1 [42] slow omicron",
"python my_command.py 0.01 -1 [42] slow mu",
"python my_command.py 0.01 -1 [133,666] slow omicron",
"python my_command.py 0.01 -1 [133,666] slow mu",
"python my_command.py 0.000001 -1 [42] slow omicron",
"python my_command.py 0.000001 -1 [42] slow mu",
"python my_command.py 0.000001 -1 [133,666] slow omicron",
"python my_command.py 0.000001 -1 [133,666] slow mu",
"python my_command.py 0.00000000001 -1 [42] slow omicron",
"python my_command.py 0.00000000001 -1 [42] slow mu",
"python my_command.py 0.00000000001 -1 [133,666] slow omicron",
"python my_command.py 0.00000000001 -1 [133,666] slow mu"])
cmd = "python my_command.py [0.01 0.001] -[1:5] slow"
assert_equal(smartdispatch.unfold_command(cmd), ["python my_command.py 0.01 -1 slow",
"python my_command.py 0.01 -2 slow",
"python my_command.py 0.01 -3 slow",
"python my_command.py 0.01 -4 slow",
"python my_command.py 0.001 -1 slow",
"python my_command.py 0.001 -2 slow",
"python my_command.py 0.001 -3 slow",
"python my_command.py 0.001 -4 slow"])
cmd = "python my_command.py -[1:5] slow [0.01 0.001]"
assert_equal(smartdispatch.unfold_command(cmd), ["python my_command.py -1 slow 0.01",
"python my_command.py -1 slow 0.001",
"python my_command.py -2 slow 0.01",
"python my_command.py -2 slow 0.001",
"python my_command.py -3 slow 0.01",
"python my_command.py -3 slow 0.001",
"python my_command.py -4 slow 0.01",
"python my_command.py -4 slow 0.001"])
def test_replace_uid_tag():
command = "command without uid tag"
assert_array_equal(smartdispatch.replace_uid_tag([command]), [command])
command = "command with one {UID} tag"
uid = utils.generate_uid_from_string(command)
assert_array_equal(smartdispatch.replace_uid_tag([command]), [command.replace("{UID}", uid)])
command = "command with two {UID} tag {UID}"
uid = utils.generate_uid_from_string(command)
assert_array_equal(smartdispatch.replace_uid_tag([command]), [command.replace("{UID}", uid)])
commands = ["a command with a {UID} tag"] * 10
uid = utils.generate_uid_from_string(commands[0])
assert_array_equal(smartdispatch.replace_uid_tag(commands), [commands[0].replace("{UID}", uid)] * len(commands))
def test_get_available_queues():
assert_equal(smartdispatch.get_available_queues(cluster_name=None), {})
assert_equal(smartdispatch.get_available_queues(cluster_name="unknown"), {})
queues_infos = smartdispatch.get_available_queues(cluster_name="guillimin")
assert_true(len(queues_infos) > 0)
queues_infos = smartdispatch.get_available_queues(cluster_name="mammouth")
assert_true(len(queues_infos) > 0)
def test_get_job_folders():
temp_dir = tempfile.mkdtemp()
jobname = "this_is_the_name_of_my_job"
job_folders_paths = smartdispatch.get_job_folders(temp_dir, jobname)
path_job, path_job_logs, path_job_commands = job_folders_paths
assert_true(jobname in path_job)
assert_true(os.path.isdir(path_job))
assert_equal(os.path.basename(path_job), jobname)
assert_true(jobname in path_job_logs)
assert_true(os.path.isdir(path_job_logs))
assert_true(os.path.isdir(pjoin(path_job_logs, 'worker')))
assert_true(os.path.isdir(pjoin(path_job_logs, 'job')))
assert_true(os.path.isdir(path_job_logs))
assert_equal(os.path.basename(path_job_logs), "logs")
assert_true(jobname in path_job_commands)
assert_true(os.path.isdir(path_job_commands))
assert_equal(os.path.basename(path_job_commands), "commands")
jobname += "2"
os.rename(path_job, path_job + "2")
job_folders_paths = smartdispatch.get_job_folders(temp_dir, jobname)
path_job, path_job_logs, path_job_commands = job_folders_paths
assert_true(jobname in path_job)
assert_true(os.path.isdir(path_job))
assert_equal(os.path.basename(path_job), jobname)
assert_true(jobname in path_job_logs)
assert_true(os.path.isdir(path_job_logs))
assert_true(os.path.isdir(pjoin(path_job_logs, 'worker')))
assert_true(os.path.isdir(pjoin(path_job_logs, 'job')))
assert_true(os.path.isdir(path_job_logs))
assert_equal(os.path.basename(path_job_logs), "logs")
assert_true(jobname in path_job_commands)
assert_true(os.path.isdir(path_job_commands))
assert_equal(os.path.basename(path_job_commands), "commands")
shutil.rmtree(temp_dir)
def test_log_command_line():
temp_dir = tempfile.mkdtemp()
command_line_log_file = pjoin(temp_dir, "command_line.log")
command_1 = "echo 1 2 3"
smartdispatch.log_command_line(temp_dir, command_1)
assert_true(os.path.isfile(command_line_log_file))
lines = open(command_line_log_file).read().strip().split("\n")
assert_equal(len(lines), 2)
assert_true(t.strftime("
assert_equal(lines[1], command_1)
command_2 = "echo \"bob\""
smartdispatch.log_command_line(temp_dir, command_2)
assert_true(os.path.isfile(command_line_log_file))
lines = open(command_line_log_file).read().strip().split("\n")
assert_equal(len(lines), 5)
assert_true(t.strftime("
assert_equal(lines[4], command_2.replace('"', r'\"'))
command_3 = "echo [asd]"
smartdispatch.log_command_line(temp_dir, command_3)
assert_true(os.path.isfile(command_line_log_file))
lines = open(command_line_log_file).read().strip().split("\n")
assert_equal(len(lines), 8)
assert_true(t.strftime("
assert_equal(lines[7], re.sub(r'(\[)([^\[\]]*\\ [^\[\]]*)(\])', r'"\1\2\3"', command_3))
shutil.rmtree(temp_dir)
|
data/Nodd/spyder_line_profiler/p_memoryprofiler.py
|
"""Memory profiler Plugin"""
from spyderlib.qt.QtGui import QVBoxLayout, QGroupBox, QLabel
from spyderlib.qt.QtCore import SIGNAL, Qt
from spyderlib.baseconfig import get_translation
_ = get_translation("p_memoryprofiler", dirname="spyderplugins")
from spyderlib.utils.qthelpers import get_icon, create_action
from spyderlib.plugins import SpyderPluginMixin, PluginConfigPage, runconfig
from spyderplugins.widgets.memoryprofilergui import (
MemoryProfilerWidget, is_memoryprofiler_installed)
class MemoryProfilerConfigPage(PluginConfigPage):
"""Widget with configuration options for memory profiler
"""
def setup_page(self):
settings_group = QGroupBox(_("Settings"))
use_color_box = self.create_checkbox(
_("Use deterministic colors to differentiate functions"),
'use_colors', default=True)
results_group = QGroupBox(_("Results"))
results_label1 = QLabel(_("Memory profiler plugin results "
"(the output of memory_profiler)\n"
"is stored here:"))
results_label1.setWordWrap(True)
results_label2 = QLabel(MemoryProfilerWidget.DATAPATH)
results_label2.setTextInteractionFlags(Qt.TextSelectableByMouse)
results_label2.setWordWrap(True)
settings_layout = QVBoxLayout()
settings_layout.addWidget(use_color_box)
settings_group.setLayout(settings_layout)
results_layout = QVBoxLayout()
results_layout.addWidget(results_label1)
results_layout.addWidget(results_label2)
results_group.setLayout(results_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(settings_group)
vlayout.addWidget(results_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
class MemoryProfiler(MemoryProfilerWidget, SpyderPluginMixin):
"""Memory profiler"""
CONF_SECTION = 'memoryprofiler'
CONFIGWIDGET_CLASS = MemoryProfilerConfigPage
def __init__(self, parent=None):
MemoryProfilerWidget.__init__(self, parent=parent)
SpyderPluginMixin.__init__(self, parent)
self.initialize_plugin()
def get_plugin_title(self):
"""Return widget title"""
return _("Memory profiler")
def get_plugin_icon(self):
"""Return widget icon"""
return get_icon('profiler.png')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
return self.datatree
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
return []
def on_first_registration(self):
"""Action to be performed on first plugin registration"""
self.main.tabify_plugins(self.main.inspector, self)
self.dockwidget.hide()
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.connect(self, SIGNAL("edit_goto(QString,int,QString)"),
self.main.editor.load)
self.connect(self, SIGNAL('redirect_stdio(bool)'),
self.main.redirect_internalshell_stdio)
self.main.add_dockwidget(self)
memoryprofiler_act = create_action(self, _("Profile memory line by line"),
icon=self.get_plugin_icon(),
triggered=self.run_memoryprofiler)
memoryprofiler_act.setEnabled(is_memoryprofiler_installed())
self.register_shortcut(memoryprofiler_act, context="Memory Profiler",
name="Run memory profiler", default="Ctrl+Shift+F10")
self.main.run_menu_actions += [memoryprofiler_act]
self.main.editor.pythonfile_dependent_actions += [memoryprofiler_act]
def refresh_plugin(self):
"""Refresh memory profiler widget"""
pass
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
return True
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
pass
def run_memoryprofiler(self):
"""Run memory profiler"""
self.analyze(self.main.editor.get_current_filename())
def analyze(self, filename):
"""Reimplement analyze method"""
if self.dockwidget and not self.ismaximized:
self.dockwidget.setVisible(True)
self.dockwidget.setFocus()
self.dockwidget.raise_()
pythonpath = self.main.get_spyder_pythonpath()
runconf = runconfig.get_run_configuration(filename)
wdir, args = None, None
if runconf is not None:
if runconf.wdir_enabled:
wdir = runconf.wdir
if runconf.args_enabled:
args = runconf.args
MemoryProfilerWidget.analyze(
self, filename, wdir=wdir, args=args, pythonpath=pythonpath,
use_colors=self.get_option('use_colors', True))
PLUGIN_CLASS = MemoryProfiler
|
data/agiliq/merchant/billing/forms/common.py
|
from django import forms
from billing.utils.credit_card import CreditCard, CardNotSupported
class CreditCardFormBase(forms.Form):
"""
Base class for a simple credit card form which provides some utilities like
'get_credit_card' to return a CreditCard instance.
If you pass the gateway as a keyword argument to the constructor,
the gateway.validate_card method will be used in form validation.
This class must be subclassed to provide the actual fields to be used.
"""
def __init__(self, *args, **kwargs):
self.gateway = kwargs.pop('gateway', None)
super(CreditCardFormBase, self).__init__(*args, **kwargs)
def get_credit_card(self):
"""
Returns a CreditCard from the submitted (cleaned) data.
If gateway was passed to the form constructor, the gateway.validate_card
method will be called - which can throw CardNotSupported, and will also
add the attribute 'card_type' which is the CreditCard subclass if it is
successful.
"""
card = CreditCard(**self.cleaned_data)
if self.gateway is not None:
self.gateway.validate_card(card)
return card
def clean(self):
cleaned_data = super(CreditCardFormBase, self).clean()
if self.errors:
return cleaned_data
try:
credit_card = self.get_credit_card()
if not credit_card.is_valid():
raise forms.ValidationError("Credit card details are invalid")
except CardNotSupported:
raise forms.ValidationError("This type of credit card is not supported. Please check the number.")
return cleaned_data
|
data/KanColleTool/kcsrv/kcsrv.py
|
import logging
from app import app, logger
root = logging.getLogger()
root.setLevel(logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
if __name__ == '__main__':
logger.warn("This is a debugging configuration. Run with the gunicorn script to run in production.")
app.run(host='0.0.0.0', debug=True)
|
data/MBoustani/Geothon/Create Spatial File/Vector/create_wkt_multipoint.py
|
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/create_wkt_multipoint.py
Description: This code creates a wkt multi point from longitue and latitude.
Author: Maziyar Boustani (github.com/MBoustani)
'''
try:
import ogr
except ImportError:
from osgeo import ogr
latitudes = [50, 51, 52, 53, 54]
longitudes = [100, 110, 120, 130, 140]
elevation = 0
points = ogr.Geometry(ogr.wkbMultiPoint)
point_1 = ogr.Geometry(ogr.wkbPoint)
point_1.AddPoint(longitudes[0], latitudes[0], elevation)
points.AddGeometry(point_1)
point_2 = ogr.Geometry(ogr.wkbPoint)
point_2.AddPoint(longitudes[1], latitudes[1], elevation)
points.AddGeometry(point_2)
point_3 = ogr.Geometry(ogr.wkbPoint)
point_3.AddPoint(longitudes[2], latitudes[2], elevation)
points.AddGeometry(point_3)
point_4 = ogr.Geometry(ogr.wkbPoint)
point_4.AddPoint(longitudes[3], latitudes[3], elevation)
points.AddGeometry(point_4)
point_5 = ogr.Geometry(ogr.wkbPoint)
point_5.AddPoint(longitudes[4], latitudes[4], elevation)
points.AddGeometry(point_5)
points.ExportToWkt()
print points
|
data/agronholm/apscheduler/examples/schedulers/tornado_.py
|
"""
Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3
second intervals.
"""
from datetime import datetime
import os
from tornado.ioloop import IOLoop
from apscheduler.schedulers.tornado import TornadoScheduler
def tick():
print('Tick! The time is: %s' % datetime.now())
if __name__ == '__main__':
scheduler = TornadoScheduler()
scheduler.add_job(tick, 'interval', seconds=3)
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
IOLoop.instance().start()
except (KeyboardInterrupt, SystemExit):
pass
|
data/adafruit/Adafruit_Python_SSD1306/setup.py
|
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_SSD1306',
version = '1.6.0',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Python library to use SSD1306-based 128x64 or 128x32 pixel OLED displays with a Raspberry Pi or Beaglebone Black.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_SSD1306/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master
install_requires = ['Adafruit-GPIO>=0.6.5'],
packages = find_packages())
|
data/adblockplus/gyp/test/actions-none/src/fake_cross.py
|
import sys
fh = open(sys.argv[-1], 'wb')
for filename in sys.argv[1:-1]:
fh.write(open(filename).read())
fh.close()
|
data/adambullmer/sublime_docblockr_python/formatters/PEP0257.py
|
from .base import Base
class Pep0257Formatter(Base):
name = 'PEP0257'
def decorators(self, attributes):
return ''
def extends(self, attributes):
return ''
def arguments(self, attributes):
section = '\nArguments:\n'
template = '\t{name} -- {description}\n'
for attr in attributes['arguments']:
section += template.format(
name=self._generate_field('name', attr['name']),
description=self._generate_field('description'),
)
if len(attributes['arguments']) == 0:
section = ''
section += self.keyword_arguments(attributes['keyword_arguments'])
return section
def keyword_arguments(self, attributes):
section = '\nKeyword arguments:\n'
template = '\t{name} -- {description} (default: {{{default}}})\n'
if len(attributes) == 0:
return ''
for attr in attributes:
section += template.format(
name=self._generate_field('name', attr['name']),
description=self._generate_field('description'),
default=self._generate_field('default', attr['default']),
)
return section
def returns(self, attribute):
return ''
def yields(self, attribute):
return ''
def raises(self, attributes):
section = '\n'
template = 'Raises a {{{attribute}}} ${{{tab_index_1}:[description]}}\n'
for attr in attributes:
section += template.format(
attribute=attr,
tab_index_1=next(self.tab_index)
)
return section
def variables(self, attributes):
section = '\nVariables:\n'
template = '\t{name} -- {description}\n'
for attr in attributes:
section += template.format(
name=self._generate_field('name', attr['name']),
description=self._generate_field('description'),
)
return section
|
data/Kotti/Kotti/setup.py
|
import os
import sys
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.rst')).read()
AUTHORS = open(os.path.join(here, 'AUTHORS.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
except IOError:
README = AUTHORS = CHANGES = ''
install_requires = [
'Babel',
'Chameleon>=2.7.4',
'Pillow',
'alembic>=0.8.0',
'bleach',
'bleach-whitelist',
'colander>=0.9.3',
'deform>=2.0a1',
'docopt',
'filedepot',
'formencode',
'html2text',
'js.angular',
'js.bootstrap>=3.0.0',
'js.deform>=2.0a2-2',
'js.fineuploader',
'js.html5shiv',
'js.jquery',
'js.jquery_form',
'js.jquery_tablednd',
'js.jquery_timepicker_addon',
'js.jqueryui>=1.8.24',
'js.jqueryui_tagit',
'kotti_image',
'lingua>=1.3',
'plone.scale',
'py_bcrypt',
'pyramid>=1.5',
'pyramid_beaker',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_deform>=0.2a3',
'pyramid_mailer',
'pyramid_tm',
'pyramid_zcml',
'repoze.lru',
'repoze.workflow',
'rfc6266',
'sqlalchemy>=1.0.0',
'sqlalchemy-utils',
'transaction>=1.1.0',
'unidecode',
'usersettings',
'waitress',
'zope.deprecation',
'zope.sqlalchemy',
]
tests_require = [
'WebTest',
'mock',
'pyquery',
'pytest>=2.4.2',
'pytest-cov',
'pytest-pep8!=1.0.3',
'pytest-travis-fold',
'pytest-xdist',
'virtualenv',
'wsgi_intercept==0.5.1',
'zope.testbrowser',
]
development_requires = []
docs_require = [
'Sphinx',
'docutils',
'repoze.sphinx.autointerface',
'sphinx_rtd_theme',
]
if sys.version_info[:3] < (2, 7, 0):
install_requires.append('ordereddict')
setup(name='Kotti',
version='1.3.0-alpha.5-dev',
description="A high-level, Pythonic web application framework based on Pyramid and SQLAlchemy. It includes an extensible Content Management System called the Kotti CMS.",
long_description='\n\n'.join([README, AUTHORS, CHANGES]),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"License :: Repoze Public License",
],
author='Kotti developers',
author_email='kotti@googlegroups.com',
url='http://kotti.pylonsproject.org/',
keywords='kotti web cms wcms pylons pyramid sqlalchemy bootstrap',
license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
dependency_links=[],
entry_points="""\
[paste.app_factory]
main = kotti:main
[fanstatic.libraries]
kotti = kotti.fanstatic:lib_kotti
[console_scripts]
kotti-migrate = kotti.migrate:kotti_migrate_command
kotti-reset-workflow = kotti.workflow:reset_workflow_command
kotti-migrate-storage = kotti.filedepot:migrate_storages_command
[pytest11]
kotti = kotti.tests
[pyramid.scaffold]
kotti=kotti.scaffolds:KottiPackageTemplate
""",
extras_require={
'testing': tests_require,
'development': development_requires,
'docs': docs_require,
},
)
|
data/Kloudless/kloudless-python/setup.py
|
from setuptools import setup, find_packages
import re
import os
from os.path import join as opj
curdir = os.path.dirname(os.path.realpath(__file__))
def read(fname):
contents = ''
with open(fname) as f:
contents = f.read()
return contents
package_name = 'kloudless'
def version():
text = read(opj(curdir, package_name, 'version.py'))
matches = re.findall("('|\")(\S+)('|\")", text)
return matches[0][1]
install_requires=[
'requests>=1.0',
'python-dateutil',
]
test_requires = [
'selenium>=2.48.0',
'pytz>=2013d',
]
if __name__ == '__main__':
setup(
name=package_name,
packages=[package_name],
include_package_data=True,
author='Kloudless',
author_email='hello@kloudless.com',
version=version(),
description = "Python library for the Kloudless API",
long_description=read(opj(curdir, 'README.md')),
url='https://kloudless.com/',
install_requires=install_requires,
license='MIT',
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Developers',
'Natural Language :: English',
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
],
package_data={'': ['LICENSE']},
zip_safe=False,
tests_require = test_requires,
)
|
data/Stiivi/bubbles/tests/test_graph.py
|
import unittest
from bubbles import *
class GraphTestCase(unittest.TestCase):
def test_basic(self):
g = Graph()
g.add(Node("src"), "n1")
g.add(Node("distinct"),"n2")
g.add(Node("pretty_print"), "n3")
self.assertEqual(3, len(g.nodes))
g.connect("n1", "n2")
sources = g.sources("n2")
self.assertEqual(1, len(sources))
self.assertTrue(isinstance(sources["default"], Node))
self.assertEqual("src", sources["default"].opname)
def test_ports(self):
g = Graph()
g.add(Node("dim"), "dim")
g.add(Node("src"), "src")
g.add(Node("join_detail"), "j")
g.connect("dim", "j", "master")
with self.assertRaises(GraphError):
g.connect("src", "j", "master")
g.connect("src", "j", "detail")
sources = g.sources("j")
self.assertEqual(2, len(sources))
self.assertEqual(["detail", "master"], sorted(sources.keys()))
if __name__ == "__main__":
unittest.main()
|
data/PythonProgramming/Beginning-Game-Development-with-Python-and-Pygame/Chapter 6/6-6.py
|
import pygame
from pygame.locals import *
from sys import exit
from gameobjects.vector2 import Vector2
picture_file = 'map.png'
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
picture = pygame.image.load(picture_file).convert()
picture_pos = Vector2(0, 0)
scroll_speed = 1000.
clock = pygame.time.Clock()
joystick = None
if pygame.joystick.get_count() > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
if joystick is None:
print("Sorry, you need a joystick for this!")
pygame.quit()
exit()
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
scroll_direction = Vector2(*joystick.get_hat(0))
scroll_direction.normalize()
screen.fill((255, 255, 255))
screen.blit(picture, (-picture_pos.x, picture_pos.y))
time_passed = clock.tick()
time_passed_seconds = time_passed / 1000.0
picture_pos += scroll_direction * scroll_speed * time_passed_seconds
pygame.display.update()
|
data/OrbitzWorldwide/droned/droned/lib/droned/management/dmx/__main__.py
|
__doc__ = """Daemon Maker eXtraordinaire - aka DMX an application wrapper"""
__author__ = """Justin Venus <justin.venus@orbitz.com>"""
if __name__ != '__main__': raise ImportError('Not importable')
import os
import sys
DIRECTORY = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0,os.path.abspath(os.path.join(DIRECTORY,'..','..','..')))
unforkedPid = os.getpid()
childProcessPid = 0
from twisted.internet import protocol, defer
from twisted.python.log import FileLogObserver, textFromEventDict
from twisted.python.util import untilConcludes
import signal
DEFAULT_REACTORS = {
'Linux': 'epoll',
'FreeBSD': 'kqueue',
'SunOS': 'select',
}
def set_reactor():
import platform
REACTORNAME = DEFAULT_REACTORS.get(platform.system(), 'select')
if REACTORNAME == 'kqueue':
from twisted.internet import kqreactor
kqreactor.install()
elif REACTORNAME == 'epoll':
from twisted.internet import epollreactor
epollreactor.install()
elif REACTORNAME == 'poll':
from twisted.internet import pollreactor
pollreactor.install()
else:
from twisted.internet import selectreactor
selectreactor.install()
from twisted.internet import reactor
set_reactor = lambda: reactor
return reactor
class ManagedLogger(FileLogObserver):
"""overriding emit to preserve original logs"""
timeFormat = ""
def emit(self, eventDict):
"""ah, logs should be pretty much as the app intended"""
text = textFromEventDict(eventDict)
if text is None: return
untilConcludes(self.write, text)
untilConcludes(self.flush)
class DaemonProtocol(protocol.ProcessProtocol):
"""we need to track your app and help you log it's information"""
def __init__(self, name, label, r, deferred, **kwargs):
self.deferred = deferred
self.reactor = r
out = {
'type': '%s-%s_out' % (name, label)
}
err = {
'type': '%s-%s_err' % (name, label)
}
self.name = name
self.label = label
import droned.logging
self.log_stdout = droned.logging.logWithContext(**out)
self.log_stderr = droned.logging.logWithContext(**err)
def inConnectionLost(self):
"""inConnectionLost! stdin is closed! (we probably did it)"""
pass
def errReceived(self, data):
"""write the error message"""
self.log_stderr(str(data))
def outReceived(self, data):
"""write the out message"""
self.log_stdout(str(data))
def outConnectionLost(self):
"""outConnectionLost! The child closed their stdout!"""
pass
def errConnectionLost(self):
"""errConnectionLost! The child closed their stderr."""
pass
def connectionMade(self):
"""Process is running, we close STDIN"""
global closestdin
if closestdin:
self.transport.closeStdin()
global childProcessPid
global unforkedPid
x = unforkedPid
unforkedPid = 0
if x: self.reactor.callLater(2.0, os.kill, x, signal.SIGTERM)
childProcessPid = self.transport.pid
sys.stdout.write('%s [%s] running with pid %d\n' % \
(self.name, self.label, childProcessPid))
def processExited(self, reason):
"""our process has exited, time to shutdown."""
sys.stdout.write('%s has exited' % (self.name,))
if not self.deferred.called:
self.deferred.errback(reason)
global unforkedPid
global childProcessPid
childProcessPid = 0
if unforkedPid: os.kill(unforkedPid, signal.SIGTERM)
processEnded = processExited
class DaemonWrapper(object):
"""we take care of your application in a race free way."""
SIGNALS = dict((k, v) for v, k in signal.__dict__.iteritems() if \
v.startswith('SIG') and not v.startswith('SIG_'))
def __init__(self, r, name, label, cmd, args, env):
self.reactor = r
self.name = name
self.label = label
self.fqcmd = cmd
self.args = args
self.env = env
self.exitCode = 0
self.deferred = defer.succeed(None)
import droned.logging
self.log = droned.logging.logWithContext(type='dmx')
def routeSignal(self, signum, frame):
"""send signals we receive to the wrapped process"""
if signum == signal.SIGTERM:
signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.reactor.callLater(120, self.reactor.stop)
global childProcessPid
if childProcessPid:
self.log('Sending %s to PID: %d' % \
(self.SIGNALS[signum], childProcessPid))
try: os.kill(childProcessPid, signum)
except:
droned.logging.err('when sending %s to pid %d' % \
(self.SIGNALS[signum],childProcessPid))
def processResult(self, result):
"""try to get the exit code"""
self.reactor.callLater(3.0, self.reactor.stop)
return result
def running(self):
"""called when the reactor is running"""
global masksignals
if masksignals:
for signum, signame in self.SIGNALS.items():
if signame in ('SIGKILL',): continue
try: signal.signal(signum, self.routeSignal)
except RuntimeError: pass
from droned.clients import command
self.log('Starting %s [%s]' % (self.name, self.label))
pargs = (self.name, self.label, self.reactor)
pkwargs = {}
global usetty
global path
self.deferred = command(self.fqcmd, self.args, self.env,
path, usetty, {}, DaemonProtocol,
*pargs, **pkwargs
)
self.deferred.addBoth(self.processResult)
return self.deferred
env = os.environ.copy()
args = tuple(sys.argv[1:])
logdir = env.pop('DRONED_LOGDIR', os.path.join(os.path.sep, 'tmp'))
masksignals = bool(env.pop('DRONED_MASK_SIGNALS', True))
closestdin = bool(env.pop('DRONED_CLOSE_STDIN', True))
name = env.pop('DRONED_APPLICATION', 'app')
label = env.pop('DRONED_LABEL', '0')
usetty = bool(env.pop('DRONED_USE_TTY', '0'))
path = env.pop('DRONED_PATH', os.path.sep)
if name not in logdir:
t = os.path.join(logdir, name, label)
try:
if not os.path.exists(t):
os.makedirs(t, mode=0755)
logdir = t
except: pass
if args and os.path.exists(args[0]):
try: os.setsid()
except: pass
if os.fork() == 0:
os.chdir(os.path.sep)
os.umask(0)
sys.stdout.write('Daemon Pid: %d' % (os.getpid(),))
sys.stderr.flush()
sys.stdout.flush()
import droned.logging
sys.stdout = droned.logging.StdioKabob(0)
sys.stderr = droned.logging.StdioKabob(1)
maxfd = 4096
try:
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = 4096
except: pass
for fd in range(0, maxfd):
try: os.close(fd)
except OSError: pass
os.open(
hasattr(os, "devnull") and os.devnull or "/dev/null",
os.O_RDWR
)
os.dup2(0, 1)
os.dup2(0, 2)
loggers = [
'%s-%s_out' % (name, label),
'%s-%s_err' % (name, label),
]
droned.logging.logToDir(directory=logdir)
reactor = set_reactor()
droned.logging.logToDir(
directory=logdir,
LOG_TYPE=tuple(loggers),
OBSERVER=ManagedLogger
)
dmx = DaemonWrapper(reactor, name, label, args[0], args[1:], env)
def killGroup():
"""kill everybody"""
dmx.log('terminating process group')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.kill(-os.getpgid(os.getpid()), signal.SIGTERM)
reactor.addSystemEventTrigger('before', 'shutdown', killGroup)
reactor.callWhenRunning(dmx.running)
reactor.run()
sys.exit(dmx.exitCode)
else:
reactor = set_reactor()
reactor.callLater(120, sys.exit, 1)
reactor.run()
sys.exit(0)
sys.exit(255)
|
data/RoseOu/flasky/venv/lib/python2.7/site-packages/flask_pagedown/fields.py
|
from wtforms.fields import TextAreaField
from .widgets import PageDown
class PageDownField(TextAreaField):
widget = PageDown()
|
data/Microsoft/ivy/ivy/utils/rectagtuple.py
|
"""
Immutable data structures based on Python's tuple (and inspired by
namedtuple).
Defines:
tagtuple -- A tagged variant of tuple
rectuple -- A record with named fields (a'la namedtuple)
"""
import sys as _sys
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
from collections import OrderedDict
class tagtuple(tuple):
"""
tagtuple -- A variant of tuple that acts as a tagged immutable
container for a sequence of elements. Instances of different
tagtuple subclasses are are never equal. tagtuples are constracted
from a sequence of arguments, and not from an iterable (i.e.:
tagtuple(*range(10)) instead of tagtuple(range(10)).
Subclasses should set __slots__ = ()
"""
__slots__ = ()
def __new__(cls, *args):
"""
Create new instance of tagtuple
tagtuples are constracted from a sequence of arguments, and
not from an iterable. This means:
tagtuple(range(10)) --> tagtuple with 1 element that is a list
tagtuple(*range(10)) --> tagtuple with 10 int elements
"""
return super(tagtuple, cls).__new__(cls, args)
def __repr__(self):
"""Return a nicely formatted representation string"""
return type(self).__name__ + super(tagtuple, self).__repr__()
def __getnewargs__(self):
"""Return self as a plain tuple. Used by copy and pickle."""
return tuple(self)
def __eq__(self, other):
return type(self) is type(other) and super(tagtuple, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __getslice__(self, i, j):
return type(self)(*super(tagtuple, self).__getslice__(i, j))
__add__ = property()
__contains__ = property()
__mul__ = property()
__rmul__ = property()
count = property()
index = property()
_class_template = '''\
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return '{typename}({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
__dict__ = _property(_asdict)
def __getstate__(self):
'Exclude the OrderedDict from pickling'
pass
{field_defs}
__add__ = property()
__contains__ = property()
__getslice__ = property()
__len__ = property()
__mul__ = property()
__rmul__ = property()
count = property()
index = property()
def __eq__(self, other):
return type(self) is type(other) and _tuple.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def rectuple(typename, field_names, verbose=False, rename=False):
"""
Returns a new subclass of tuple that acts like a record.
Used to represent an immutable record with named fields. Compared
to namedtuple, it behaves less like a tuple and more like a
record. rectuples from different classes are never equal, and they
cannot be added or multiplied like tuples.
"""
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = map(str, field_names)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
if verbose:
print class_definition
namespace = dict(_itemgetter=_itemgetter, __name__='rectuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
import pickle
from itertools import chain, product
print "Testing tagtuple:"
print
class A(tagtuple):
__slots__ = ()
class B(tagtuple):
__slots__ = ()
a = A(1, 2, 3)
b = B(1, 2, 3)
t = (1, 2, 3)
print "a: ", a
print "b: ", b
print "t: ", t
print
print "a == b: ", a == b
print "a != b: ", a != b
print "hash(a) == hash(b): ", hash(a) == hash(b)
print "a <= b: ", a <= b
print "b <= a: ", b <= a
print
print "a == t: ", a == t
print "a != t: ", a != t
print "hash(a) == hash(t): ", hash(a) == hash(t)
print "a <= t: ", a <= t
print "t <= a: ", t <= a
print
d = {}
d[a] = 1
d[b] = 2
d[t] = 3
print "d: ", d
s = set()
s.add(a)
s.add(b)
s.add(t)
print "s: ", s
print
print "tuple(x for x in a): ", tuple(x for x in a)
print "list(a): ", list(a)
print "tuple(a): ", tuple(a)
print
a0 = pickle.loads(pickle.dumps(a, 0))
a1 = pickle.loads(pickle.dumps(a, 1))
a2 = pickle.loads(pickle.dumps(a, 2))
print "a0: ", a0
print "a1: ", a1
print "a2: ", a2
print "a0 == a, hash(a0) == hash(a): ", a0 == a, hash(a0) == hash(a)
print "a1 == a, hash(a1) == hash(a): ", a1 == a, hash(a1) == hash(a)
print "a2 == a, hash(a2) == hash(a): ", a2 == a, hash(a2) == hash(a)
print
print "a[:]: ", a[:]
print "a[1:-1]: ", a[1:-1]
print "a + a: ", a + a
print "a + b: ", a + b
print "(0, ) + a: ", (0, ) + a
print "a + (0, ): ", a + (0, )
print "2 * a: ", 2 * a
print "a * 2: ", a * 2
print
print "A(*chain((x**2 for x in range(10)), a)): ", A(*chain((x**2 for x in range(10)), a))
print "A(*product(range(3), repeat=2)): ", A(*product(range(3), repeat=2))
print
print "Testing rectuple:"
print
A = rectuple('A', 'x y', verbose=True)
B = rectuple('B', 'x y', verbose=True)
a = A(1,2)
b = B(1,2)
t = (1,2)
print "a: ", a
print "b: ", b
print "t: ", t
print
print "a == b: ", a == b
print "a != b: ", a != b
print "hash(a) == hash(b): ", hash(a) == hash(b)
print "a <= b: ", a <= b
print "b <= a: ", b <= a
print
print "a == t: ", a == t
print "a != t: ", a != t
print "hash(a) == hash(t): ", hash(a) == hash(t)
print "a <= t: ", a <= t
print "t <= a: ", t <= a
print
d = {}
d[a] = 1
d[b] = 2
d[t] = 3
print "d: ", d
s = set()
s.add(a)
s.add(b)
s.add(t)
print "s: ", s
print
print "tuple(x for x in a): ", tuple(x for x in a)
print "list(a): ", list(a)
print "tuple(a): ", tuple(a)
print
a0 = pickle.loads(pickle.dumps(a, 0))
a1 = pickle.loads(pickle.dumps(a, 1))
a2 = pickle.loads(pickle.dumps(a, 2))
print "a0: ", a0
print "a1: ", a1
print "a2: ", a2
print "a0 == a, hash(a0) == hash(a): ", a0 == a, hash(a0) == hash(a)
print "a1 == a, hash(a1) == hash(a): ", a1 == a, hash(a1) == hash(a)
print "a2 == a, hash(a2) == hash(a): ", a2 == a, hash(a2) == hash(a)
|
data/Yelp/fullerite/src/diamond/metric.py
|
import time
import re
import logging
from error import DiamondException
class Metric(object):
_METRIC_TYPES = ['COUNTER', 'GAUGE', 'CUMCOUNTER']
def __init__(self, path, value, raw_value=None, timestamp=None, precision=0,
metric_type='COUNTER', ttl=None, host="ignored", dimensions=None):
"""
Create new instance of the Metric class
Takes:
path=string: string the specifies the path of the metric
value=[float|int]: the value to be submitted
timestamp=[float|int]: the timestamp, in seconds since the epoch
(as from time.time()) precision=int: the precision to apply.
Generally the default (2) should work fine.
"""
if (None in [path, value] or metric_type not in self._METRIC_TYPES):
raise DiamondException(("Invalid parameter when creating new "
"Metric with path: %r value: %r "
"metric_type: %r")
% (path, value, metric_type))
if timestamp is None:
timestamp = int(time.time())
else:
if not isinstance(timestamp, int):
try:
timestamp = int(timestamp)
except ValueError, e:
raise DiamondException(("Invalid timestamp when "
"creating new Metric %r: %s")
% (path, e))
if not isinstance(value, (int, float)):
try:
if precision == 0:
value = round(float(value))
else:
value = float(value)
except ValueError, e:
raise DiamondException(("Invalid value when creating new "
"Metric %r: %s") % (path, e))
if dimensions is not None:
if not isinstance(dimensions, dict):
raise DiamondException(("Invalid dimensions when "
"creating new Metric %r: %s")
% (path, dimensions))
else:
dimensions = dict(
(k, str(v)) for k, v in dimensions.iteritems()
if v is not None and isinstance(v, (int, float, str, unicode)) and k is not None and isinstance(k, str)
)
self.dimensions = dimensions
self.path = path
self.value = value
self.raw_value = raw_value
self.timestamp = timestamp
self.precision = precision
self.metric_type = metric_type
self.ttl = ttl
def __repr__(self):
"""
Return the Metric as a string
"""
if not isinstance(self.precision, (int, long)):
log = logging.getLogger('diamond')
log.warn('Metric %s does not have a valid precision', self.path)
self.precision = 0
fstring = "%%s %%0.%if %%i\n" % self.precision
return fstring % (self.path, self.value, self.timestamp)
@classmethod
def parse(cls, string):
"""
Parse a string and create a metric
"""
match = re.match(r'^(?P<name>[A-Za-z0-9\.\-_]+)\s+'
+ '(?P<value>[0-9\.]+)\s+'
+ '(?P<timestamp>[0-9\.]+)(\n?)$',
string)
try:
groups = match.groupdict()
return Metric(groups['name'],
groups['value'],
float(groups['timestamp']))
except:
raise DiamondException(
"Metric could not be parsed from string: %s." % string)
def getPathPrefix(self):
"""
Returns the path prefix path
servers.cpu.total.idle
return "servers"
"""
return self.path.split('.')[0]
def getCollectorPath(self):
"""
Returns collector path
servers.cpu.total.idle
return "cpu"
"""
return self.path.split('.')[1]
def getMetricPath(self):
"""
Returns the metric path after the collector name
servers.cpu.total.idle
return "total.idle"
"""
path = self.path.split('.')[2:]
return '.'.join(path)
|
data/NumentaCorp/agamotto/agamotto/package.py
|
from agamotto.utils import execute, grepc
def installed(package):
"""Confirm that a package is installed"""
try:
return grepc(execute("yum list installed %s" % package), package) >0
except Exception, _e:
return False
def is_installed(package):
"""Convenience alias to make the tests look nicer"""
return installed(package)
|
data/Theano/Theano/theano/tests/test_printing.py
|
"""
Tests of printing functionality
"""
from __future__ import absolute_import, print_function, division
import logging
from nose.plugins.skip import SkipTest
import numpy
from six.moves import StringIO
import theano
import theano.tensor as tensor
from theano.printing import min_informative_str, debugprint
def test_pydotprint_cond_highlight():
"""
This is a REALLY PARTIAL TEST.
I did them to help debug stuff.
"""
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
f = theano.function([x], x * 2)
f([1, 2, 3, 4])
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(f, cond_highlight=True,
print_output_file=False)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
' is no IfElse node in the graph\n')
def test_pydotprint_return_image():
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
ret = theano.printing.pydotprint(x * 2, return_image=True)
assert isinstance(ret, (str, bytes))
def test_pydotprint_variables():
"""
This is a REALLY PARTIAL TEST.
I did them to help debug stuff.
It make sure the code run.
"""
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(x * 2)
if not theano.printing.pd.__name__ == "pydot_ng":
theano.printing.pydotprint_variables(x * 2)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
def test_pydotprint_long_name():
"""This is a REALLY PARTIAL TEST.
It prints a graph where there are variable and apply nodes whose long
names are different, but not the shortened names.
We should not merge those nodes in the dot graph.
"""
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
mode = theano.compile.mode.get_default_mode().excluding("fusion")
f = theano.function([x], [x * 2, x + x], mode=mode)
f([1, 2, 3, 4])
theano.printing.pydotprint(f, max_label_size=5,
print_output_file=False)
theano.printing.pydotprint([x * 2, x + x],
max_label_size=5,
print_output_file=False)
def test_pydotprint_profile():
"""Just check that pydotprint does not crash with ProfileMode."""
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
A = tensor.matrix()
f = theano.function([A], A + 1, mode='ProfileMode')
theano.printing.pydotprint(f, print_output_file=False)
def test_min_informative_str():
""" evaluates a reference output to make sure the
min_informative_str function works as intended """
A = tensor.matrix(name='A')
B = tensor.matrix(name='B')
C = A + B
C.name = 'C'
D = tensor.matrix(name='D')
E = tensor.matrix(name='E')
F = D + E
G = C + F
mis = min_informative_str(G).replace("\t", " ")
reference = """A. Elemwise{add,no_inplace}
B. C
C. Elemwise{add,no_inplace}
D. D
E. E"""
if mis != reference:
print('--' + mis + '--')
print('--' + reference + '--')
assert mis == reference
def test_debugprint():
A = tensor.matrix(name='A')
B = tensor.matrix(name='B')
C = A + B
C.name = 'C'
D = tensor.matrix(name='D')
E = tensor.matrix(name='E')
F = D + E
G = C + F
mode = theano.compile.get_default_mode().including('fusion')
g = theano.function([A, B, D, E], G, mode=mode)
debugprint(G)
s = StringIO()
debugprint(G, file=s, ids='int')
s = s.getvalue()
reference = '\n'.join([
"Elemwise{add,no_inplace} [id 0] '' ",
" |Elemwise{add,no_inplace} [id 1] 'C' ",
" | |A [id 2]",
" | |B [id 3]",
" |Elemwise{add,no_inplace} [id 4] '' ",
" |D [id 5]",
" |E [id 6]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
s = StringIO()
debugprint(G, file=s, ids='CHAR')
s = s.getvalue()
reference = "\n".join([
"Elemwise{add,no_inplace} [id A] '' ",
" |Elemwise{add,no_inplace} [id B] 'C' ",
" | |A [id C]",
" | |B [id D]",
" |Elemwise{add,no_inplace} [id E] '' ",
" |D [id F]",
" |E [id G]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
s = StringIO()
debugprint(G, file=s, ids='CHAR', stop_on_name=True)
s = s.getvalue()
reference = '\n'.join([
"Elemwise{add,no_inplace} [id A] '' ",
" |Elemwise{add,no_inplace} [id B] 'C' ",
" |Elemwise{add,no_inplace} [id C] '' ",
" |D [id D]",
" |E [id E]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
s = StringIO()
debugprint(G, file=s, ids='')
s = s.getvalue()
reference = '\n'.join([
"Elemwise{add,no_inplace} '' ",
" |Elemwise{add,no_inplace} 'C' ",
" | |A ",
" | |B ",
" |Elemwise{add,no_inplace} '' ",
" |D ",
" |E ",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
s = StringIO()
debugprint(g, file=s, ids='', print_storage=True)
s = s.getvalue()
reference = '\n'.join([
"Elemwise{add,no_inplace} '' 0 [None]",
" |A [None]",
" |B [None]",
" |D [None]",
" |E [None]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
def test_scan_debugprint1():
k = tensor.iscalar("k")
A = tensor.dvector("A")
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = result[-1]
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [id A] ''
|Subtensor{int64::} [id B] ''
| |for{cpu,scan_fn} [id C] ''
| | |k [id D]
| | |IncSubtensor{Set;:int64:} [id E] ''
| | | |AllocEmpty{dtype='float64'} [id F] ''
| | | | |Elemwise{add,no_inplace} [id G] ''
| | | | | |k [id D]
| | | | | |Subtensor{int64} [id H] ''
| | | | | |Shape [id I] ''
| | | | | | |Rebroadcast{0} [id J] ''
| | | | | | |DimShuffle{x,0} [id K] ''
| | | | | | |Elemwise{second,no_inplace} [id L] ''
| | | | | | |A [id M]
| | | | | | |DimShuffle{x} [id N] ''
| | | | | | |TensorConstant{1.0} [id O]
| | | | | |Constant{0} [id P]
| | | | |Subtensor{int64} [id Q] ''
| | | | |Shape [id R] ''
| | | | | |Rebroadcast{0} [id J] ''
| | | | |Constant{1} [id S]
| | | |Rebroadcast{0} [id J] ''
| | | |ScalarFromTensor [id T] ''
| | | |Subtensor{int64} [id H] ''
| | |A [id M]
| |Constant{1} [id U]
|Constant{-1} [id V]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id C] ''
>Elemwise{mul,no_inplace} [id W] ''
> |<TensorType(float64, vector)> [id X] -> [id E]
> |A_copy [id Y] -> [id M]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint2():
coefficients = theano.tensor.vector("coefficients")
x = tensor.scalar("x")
max_coefficients_supported = 10000
components, updates = theano.scan(fn=lambda coefficient, power,
free_variable:
coefficient * (free_variable ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=x)
polynomial = components.sum()
output_str = theano.printing.debugprint(polynomial, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [id A] ''
|for{cpu,scan_fn} [id B] ''
|Elemwise{minimum,no_inplace} [id C] ''
| |Subtensor{int64} [id D] ''
| | |Shape [id E] ''
| | | |Subtensor{int64::} [id F] 'coefficients[0:]'
| | | |coefficients [id G]
| | | |Constant{0} [id H]
| | |Constant{0} [id I]
| |Subtensor{int64} [id J] ''
| |Shape [id K] ''
| | |Subtensor{int64::} [id L] ''
| | |ARange{dtype='int64'} [id M] ''
| | | |TensorConstant{0} [id N]
| | | |TensorConstant{10000} [id O]
| | | |TensorConstant{1} [id P]
| | |Constant{0} [id Q]
| |Constant{0} [id R]
|Subtensor{:int64:} [id S] ''
| |Subtensor{int64::} [id F] 'coefficients[0:]'
| |ScalarFromTensor [id T] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Subtensor{:int64:} [id U] ''
| |Subtensor{int64::} [id L] ''
| |ScalarFromTensor [id V] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Elemwise{minimum,no_inplace} [id C] ''
|x [id W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id B] ''
>Elemwise{mul,no_inplace} [id X] ''
> |coefficients[t] [id Y] -> [id S]
> |Elemwise{pow,no_inplace} [id Z] ''
> |x_copy [id BA] -> [id W]
> |<TensorType(int64, scalar)> [id BB] -> [id U]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint3():
coefficients = theano.tensor.dvector("coefficients")
max_coefficients_supported = 10
k = tensor.iscalar("k")
A = tensor.dvector("A")
def compute_A_k(A, k):
result, updates = theano.scan(fn=lambda prior_result,
A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
A_k = result[-1]
return A_k
components, updates = theano.scan(fn=lambda coefficient,
power, some_A, some_k:
coefficient *
(compute_A_k(some_A, some_k) ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=[A, k])
polynomial = components.sum()
final_result = polynomial
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [id A] ''
|for{cpu,scan_fn} [id B] ''
|Elemwise{minimum,no_inplace} [id C] ''
| |Subtensor{int64} [id D] ''
| | |Shape [id E] ''
| | | |Subtensor{int64::} [id F] 'coefficients[0:]'
| | | |coefficients [id G]
| | | |Constant{0} [id H]
| | |Constant{0} [id I]
| |Subtensor{int64} [id J] ''
| |Shape [id K] ''
| | |Subtensor{int64::} [id L] ''
| | |ARange{dtype='int64'} [id M] ''
| | | |TensorConstant{0} [id N]
| | | |TensorConstant{10} [id O]
| | | |TensorConstant{1} [id P]
| | |Constant{0} [id Q]
| |Constant{0} [id R]
|Subtensor{:int64:} [id S] ''
| |Subtensor{int64::} [id F] 'coefficients[0:]'
| |ScalarFromTensor [id T] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Subtensor{:int64:} [id U] ''
| |Subtensor{int64::} [id L] ''
| |ScalarFromTensor [id V] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Elemwise{minimum,no_inplace} [id C] ''
|A [id W]
|k [id X]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id B] ''
>Elemwise{mul,no_inplace} [id Y] ''
> |DimShuffle{x} [id Z] ''
> | |coefficients[t] [id BA] -> [id S]
> |Elemwise{pow,no_inplace} [id BB] ''
> |Subtensor{int64} [id BC] ''
> | |Subtensor{int64::} [id BD] ''
> | | |for{cpu,scan_fn} [id BE] ''
> | | | |k_copy [id BF] -> [id X]
> | | | |IncSubtensor{Set;:int64:} [id BG] ''
> | | | | |AllocEmpty{dtype='float64'} [id BH] ''
> | | | | | |Elemwise{add,no_inplace} [id BI] ''
> | | | | | | |k_copy [id BF] -> [id X]
> | | | | | | |Subtensor{int64} [id BJ] ''
> | | | | | | |Shape [id BK] ''
> | | | | | | | |Rebroadcast{0} [id BL] ''
> | | | | | | | |DimShuffle{x,0} [id BM] ''
> | | | | | | | |Elemwise{second,no_inplace} [id BN] ''
> | | | | | | | |A_copy [id BO] -> [id W]
> | | | | | | | |DimShuffle{x} [id BP] ''
> | | | | | | | |TensorConstant{1.0} [id BQ]
> | | | | | | |Constant{0} [id BR]
> | | | | | |Subtensor{int64} [id BS] ''
> | | | | | |Shape [id BT] ''
> | | | | | | |Rebroadcast{0} [id BL] ''
> | | | | | |Constant{1} [id BU]
> | | | | |Rebroadcast{0} [id BL] ''
> | | | | |ScalarFromTensor [id BV] ''
> | | | | |Subtensor{int64} [id BJ] ''
> | | | |A_copy [id BO] -> [id W]
> | | |Constant{1} [id BW]
> | |Constant{-1} [id BX]
> |DimShuffle{x} [id BY] ''
> |<TensorType(int64, scalar)> [id BZ] -> [id U]
for{cpu,scan_fn} [id BE] ''
>Elemwise{mul,no_inplace} [id CA] ''
> |<TensorType(float64, vector)> [id CB] -> [id BG]
> |A_copy [id CC] -> [id BO]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1 + a_m2, b_m1 + b_m2
a0 = theano.shared(numpy.arange(2, dtype='int64'))
b0 = theano.shared(numpy.arange(2, dtype='int64'))
(a, b), _ = theano.scan(
fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
{'initial': b0, 'taps': [-2, -1]}],
n_steps=5)
final_result = a + b
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Elemwise{add,no_inplace} [id A] ''
|Subtensor{int64::} [id B] ''
| |for{cpu,scan_fn}.0 [id C] ''
| | |TensorConstant{5} [id D]
| | |IncSubtensor{Set;:int64:} [id E] ''
| | | |AllocEmpty{dtype='int64'} [id F] ''
| | | | |Elemwise{add,no_inplace} [id G] ''
| | | | |TensorConstant{5} [id D]
| | | | |Subtensor{int64} [id H] ''
| | | | |Shape [id I] ''
| | | | | |Subtensor{:int64:} [id J] ''
| | | | | |<TensorType(int64, vector)> [id K]
| | | | | |Constant{2} [id L]
| | | | |Constant{0} [id M]
| | | |Subtensor{:int64:} [id J] ''
| | | |ScalarFromTensor [id N] ''
| | | |Subtensor{int64} [id H] ''
| | |IncSubtensor{Set;:int64:} [id O] ''
| | |AllocEmpty{dtype='int64'} [id P] ''
| | | |Elemwise{add,no_inplace} [id Q] ''
| | | |TensorConstant{5} [id D]
| | | |Subtensor{int64} [id R] ''
| | | |Shape [id S] ''
| | | | |Subtensor{:int64:} [id T] ''
| | | | |<TensorType(int64, vector)> [id U]
| | | | |Constant{2} [id V]
| | | |Constant{0} [id W]
| | |Subtensor{:int64:} [id T] ''
| | |ScalarFromTensor [id X] ''
| | |Subtensor{int64} [id R] ''
| |Constant{2} [id Y]
|Subtensor{int64::} [id Z] ''
|for{cpu,scan_fn}.1 [id C] ''
|Constant{2} [id BA]
Inner graphs of the scan ops:
for{cpu,scan_fn}.0 [id C] ''
>Elemwise{add,no_inplace} [id BB] ''
> |<TensorType(int64, scalar)> [id BC] -> [id E]
> |<TensorType(int64, scalar)> [id BD] -> [id E]
>Elemwise{add,no_inplace} [id BE] ''
> |<TensorType(int64, scalar)> [id BF] -> [id O]
> |<TensorType(int64, scalar)> [id BG] -> [id O]
for{cpu,scan_fn}.1 [id C] ''
>Elemwise{add,no_inplace} [id BB] ''
>Elemwise{add,no_inplace} [id BE] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint5():
k = tensor.iscalar("k")
A = tensor.dvector("A")
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = tensor.grad(result[-1].sum(), A)
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [id A] ''
|for{cpu,grad_of_scan_fn}.1 [id B] ''
| |Elemwise{sub,no_inplace} [id C] ''
| | |Subtensor{int64} [id D] ''
| | | |Shape [id E] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |k [id G]
| | | | |IncSubtensor{Set;:int64:} [id H] ''
| | | | | |AllocEmpty{dtype='float64'} [id I] ''
| | | | | | |Elemwise{add,no_inplace} [id J] ''
| | | | | | | |k [id G]
| | | | | | | |Subtensor{int64} [id K] ''
| | | | | | | |Shape [id L] ''
| | | | | | | | |Rebroadcast{0} [id M] ''
| | | | | | | | |DimShuffle{x,0} [id N] ''
| | | | | | | | |Elemwise{second,no_inplace} [id O] ''
| | | | | | | | |A [id P]
| | | | | | | | |DimShuffle{x} [id Q] ''
| | | | | | | | |TensorConstant{1.0} [id R]
| | | | | | | |Constant{0} [id S]
| | | | | | |Subtensor{int64} [id T] ''
| | | | | | |Shape [id U] ''
| | | | | | | |Rebroadcast{0} [id M] ''
| | | | | | |Constant{1} [id V]
| | | | | |Rebroadcast{0} [id M] ''
| | | | | |ScalarFromTensor [id W] ''
| | | | | |Subtensor{int64} [id K] ''
| | | | |A [id P]
| | | |Constant{0} [id X]
| | |TensorConstant{1} [id Y]
| |Subtensor{:int64:} [id Z] ''
| | |Subtensor{::int64} [id BA] ''
| | | |Subtensor{:int64:} [id BB] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |Constant{-1} [id BC]
| | | |Constant{-1} [id BD]
| | |ScalarFromTensor [id BE] ''
| | |Elemwise{sub,no_inplace} [id C] ''
| |Subtensor{:int64:} [id BF] ''
| | |Subtensor{:int64:} [id BG] ''
| | | |Subtensor{::int64} [id BH] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |Constant{-1} [id BI]
| | | |Constant{-1} [id BJ]
| | |ScalarFromTensor [id BK] ''
| | |Elemwise{sub,no_inplace} [id C] ''
| |Subtensor{::int64} [id BL] ''
| | |IncSubtensor{Inc;int64::} [id BM] ''
| | | |Elemwise{second,no_inplace} [id BN] ''
| | | | |for{cpu,scan_fn} [id BO] ''
| | | | | |k [id G]
| | | | | |IncSubtensor{Set;:int64:} [id H] ''
| | | | | |A [id P]
| | | | |DimShuffle{x,x} [id BP] ''
| | | | |TensorConstant{0.0} [id BQ]
| | | |IncSubtensor{Inc;int64} [id BR] ''
| | | | |Elemwise{second,no_inplace} [id BS] ''
| | | | | |Subtensor{int64::} [id BT] ''
| | | | | | |for{cpu,scan_fn} [id BO] ''
| | | | | | |Constant{1} [id BU]
| | | | | |DimShuffle{x,x} [id BV] ''
| | | | | |TensorConstant{0.0} [id BQ]
| | | | |Elemwise{second} [id BW] ''
| | | | | |Subtensor{int64} [id BX] ''
| | | | | | |Subtensor{int64::} [id BT] ''
| | | | | | |Constant{-1} [id BY]
| | | | | |DimShuffle{x} [id BZ] ''
| | | | | |Elemwise{second,no_inplace} [id CA] ''
| | | | | |Sum{acc_dtype=float64} [id CB] ''
| | | | | | |Subtensor{int64} [id BX] ''
| | | | | |TensorConstant{1.0} [id R]
| | | | |Constant{-1} [id BY]
| | | |Constant{1} [id BU]
| | |Constant{-1} [id CC]
| |Alloc [id CD] ''
| | |TensorConstant{0.0} [id BQ]
| | |Elemwise{add,no_inplace} [id CE] ''
| | | |Elemwise{sub,no_inplace} [id C] ''
| | | |TensorConstant{1} [id Y]
| | |Subtensor{int64} [id CF] ''
| | |Shape [id CG] ''
| | | |A [id P]
| | |Constant{0} [id CH]
| |A [id P]
|Constant{-1} [id CI]
Inner graphs of the scan ops:
for{cpu,grad_of_scan_fn}.1 [id B] ''
>Elemwise{add,no_inplace} [id CJ] ''
> |Elemwise{mul} [id CK] ''
> | |<TensorType(float64, vector)> [id CL] -> [id BL]
> | |A_copy [id CM] -> [id P]
> |<TensorType(float64, vector)> [id CN] -> [id BL]
>Elemwise{add,no_inplace} [id CO] ''
> |Elemwise{mul} [id CP] ''
> | |<TensorType(float64, vector)> [id CL] -> [id BL]
> | |<TensorType(float64, vector)> [id CQ] -> [id Z]
> |<TensorType(float64, vector)> [id CR] -> [id CD]
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CS] ''
> |<TensorType(float64, vector)> [id CT] -> [id H]
> |A_copy [id CU] -> [id P]
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CS] ''
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CS] ''
for{cpu,scan_fn} [id BO] ''
>Elemwise{mul,no_inplace} [id CS] ''
for{cpu,scan_fn} [id BO] ''
>Elemwise{mul,no_inplace} [id CS] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_printing_scan():
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
def f_pow2(x_tm1):
return 2 * x_tm1
state = theano.tensor.scalar('state')
n_steps = theano.tensor.iscalar('nsteps')
output, updates = theano.scan(f_pow2,
[],
state,
[],
n_steps=n_steps,
truncate_gradient=-1,
go_backwards=False)
f = theano.function([state, n_steps],
output,
updates=updates,
allow_input_downcast=True)
theano.printing.pydotprint(output, scan_graphs=True)
theano.printing.pydotprint(f, scan_graphs=True)
|
data/adlibre/Adlibre-DMS/adlibre_dms/apps/dms_plugins/pluginpoints.py
|
from djangoplugins.point import PluginMount
class BeforeStoragePluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'before_storage_plugins'
class DatabaseStoragePluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'database_storage_plugins'
class StoragePluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'storage_plugins'
class BeforeRetrievalPluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'before_retrieval_plugins'
class BeforeRemovalPluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'before_removal_plugins'
class BeforeUpdatePluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'before_update_plugins'
class UpdatePluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'update_plugins'
class DatabaseUpdatePluginPoint(object):
__metaclass__ = PluginMount
settings_field_name = 'database_update_plugins'
|
data/OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/casehandlers/query_hdf5.py
|
from weakref import ref
import numpy as np
from openmdao.main.api import VariableTree
from openmdao.lib.casehandlers.query import DictList, ListResult
_GLOBAL_DICT = dict(__builtins__=None)
class CaseDatasetHDF5(object):
"""
Reads case data from `filename` and allows queries on it.
To get all case data::
cds = CaseDataset('recorded.hdf5', 'hdf5')
cases = cds.data.fetch()
To get names of columns returned::
names = cds.data.var_names().fetch()
To select a specific set of variables::
cases = cds.data.vars(['top.sub.comp.x, top.sub.comp.y']).fetch()
To get a case and all its child cases::
cases = cds.data.parent_case(parent_itername).fetch()
To get cases for a particular driver::
cases = cds.data.driver(driver_name).fetch()
To get cases for a particular run of a particular driver::
cases = cds.data.driver(driver_name).parent_case(parent_itername).fetch()
Other possibilities exist, see :class:`QueryHDF5`.
"""
def __init__(self, filename, format):
format = format.lower()
if format == 'hdf5':
self._reader = _HDF5Reader(filename)
else:
raise ValueError("dataset format must be 'hdf5'")
self._query_id = self._query_itername = self._parent_id = self._parent_itername = self._driver_id = self._driver_name = None
self._case_ids = self._drivers = self._case_iternames = None
self.metadata_names = ['_id', '_parent_id', '_driver_id', '_driver_name', '_itername', 'error_status',
'error_message', 'timestamp']
@property
def data(self):
""" :class:`Query` object. """
return QueryHDF5(self)
@property
def drivers(self):
""" List of driver info dictionaries. """
return self._reader.drivers()
@property
def simulation_info(self):
""" Simulation info dictionary. """
return self._reader.simulation_info
def _fetch(self, query):
""" Return data based on `query`. """
self._setup(query)
if query.vnames:
tmp = []
for name in self.metadata_names:
if name in query.vnames:
tmp.append(name)
self.metadata_names = tmp
names = query.vnames
else:
if query.driver_name:
driver_info = self._drivers[self._driver_name]
prefix = driver_info['prefix']
all_names = [prefix+name
for name in driver_info['recording']]
else:
all_names = []
for driver_info in self._drivers.values():
prefix = driver_info['prefix']
all_names.extend([prefix+name
for name in driver_info['recording']])
names = sorted(all_names+self.metadata_names)
if query.names:
return names
nan = float('NaN')
rows = ListResult()
state = {}
for case_data in self._reader.cases():
data = case_data['data']
metadata = case_data['metadata']
case_id = metadata['_id']
case_driver_id = metadata['_driver_id']
case_driver_name = metadata['_driver_name']
case_itername = metadata['_itername']
prefix = self._drivers[case_driver_name]['prefix']
if prefix:
pass
else:
data = data.copy()
state.update(data)
if self._driver_name is not None and \
case_driver_name != self._driver_name:
continue
if self._case_iternames is None or case_itername in self._case_iternames:
for name in self.metadata_names:
data[name] = case_data['metadata'][name]
row = DictList(names)
for name in names:
if query.local_only:
if name in self.metadata_names:
row.append(data[name])
else:
driver = self._drivers[case_driver_name]
lnames = [prefix+rec for rec in driver['recording']]
if name in lnames:
row.append(data[name])
else:
row.append(nan)
elif name in state:
row.append(state[name])
elif name in data:
row.append(data[name])
else:
row.append(nan)
rows.append(row)
if case_itername == self._query_itername or case_itername == self._parent_itername:
break
if self._query_id and not rows:
raise ValueError('No case with _id %s' % self._query_id)
if query.transpose:
tmp = DictList(names)
for i in range(len(rows[0])):
tmp.append([row[i] for row in rows])
tmp.cds = self
return tmp
rows.cds = self
return rows
def _write(self, query, out, format):
raise NotImplementedError
def _setup(self, query):
""" Setup for processing `query`. """
if query.vnames is not None:
bad = []
metadata = self.simulation_info['variable_metadata']
expressions = self.simulation_info['expressions']
for name in query.vnames:
if name not in metadata and name not in [ e['pcomp_name'] for e in expressions.values()] and name not in self.metadata_names:
bad.append(name)
if bad:
raise RuntimeError('Names not found in the dataset: %s' % bad)
self._drivers = {}
self._driver_id = None
self._driver_name = None
for driver_info in self._reader.drivers():
_id = driver_info['_id']
name = driver_info['name']
prefix, _, name = name.rpartition('.')
if prefix:
prefix += '.'
driver_info['prefix'] = prefix
self._drivers[driver_info['name'] ] = driver_info
if ( driver_info['name'] ) == query.driver_name:
self._driver_name = query.driver_name
if query.driver_name:
if self._driver_name is None:
raise ValueError('No driver named %r' % query.driver_name)
self._case_ids = None
self._query_id = None
self._parent_id = None
if query.case_itername is not None:
self._query_itername = query.case_itername
self._case_iternames = set((self._query_itername,))
self._driver_name = None
elif query.parent_itername is not None:
self._parent_itername = query.parent_itername
self._case_iternames = set((self._parent_itername,))
parent_itername_parts = self._parent_itername.split('-')
for case_data in self._reader.cases():
itername = case_data['metadata']['_itername']
itername_parts = itername.split('-')
if len(parent_itername_parts) + 1 == len(itername_parts) and itername_parts[:-1] == parent_itername_parts:
self._case_iternames.add(itername)
def restore(self, assembly, case_id):
""" Restore case `case_id` into `assembly`. """
raise NotImplementedError
def _set(self, assembly, name, value):
""" Set `name` in `assembly` to `value`. """
if isinstance(value, dict):
curr = assembly.get(name)
if isinstance(curr, VariableTree):
for key, val in value.items():
self._set(assembly, '.'.join((name, key)), val)
elif '[' in name:
if isinstance(value, unicode):
value = str(value)
exec('assembly.%s = value' % name, _GLOBAL_DICT, locals())
else:
for key, val in value.items():
if isinstance(val, unicode):
value[key] = str(val)
assembly.set(name, value)
else:
if isinstance(value, unicode):
value = str(value)
if '[' in name:
exec('assembly.%s = value' % name, _GLOBAL_DICT, locals())
else:
assembly.set(name, value)
class QueryHDF5(object):
"""
Retains query information for a :class:`CaseDataset`. All methods other
than :meth:`fetch` and :meth:`write` return ``self``, so operations are
easily chained. If the same method is called more than once, only the last
call has an effect.
"""
def __init__(self, dataset):
self._dataset = dataset
self.driver_name = None
self.case_id = None
self.case_itername = None
self.parent_id = None
self.parent_itername = None
self.vnames = None
self.local_only = False
self.names = False
self.transpose = False
def fetch(self):
""" Return a list of rows of data, one for each selected case. """
return self._dataset._fetch(self)
def write(self, out, format=None):
"""
Write filtered :class:`CaseDataset` to `out`, a filename or file-like
object. Default `format` is the format of the original data file.
"""
raise NotImplementedError
def driver(self, driver_name):
""" Filter the cases to those recorded by the named driver. """
self.driver_name = driver_name
return self
def case(self, case_itername):
""" Return this case. """
self.case_itername = case_itername
self.parent_itername = None
return self
def parent_case(self, parent_case_id):
""" Filter the cases to only include this case and its children. """
self.parent_id = parent_case_id
self.parent_itername = parent_case_id
self.case_id = None
return self
def vars(self, *args):
""" Filter the variable columns returned in the row. """
self.vnames = []
for arg in args:
if isinstance(arg, basestring):
self.vnames.append(arg)
else:
self.vnames.extend(arg)
return self
def local(self):
"""
Restrict the variables returned to only those in the specific driver's
local set. This means that if there are cases from more than one driver,
variables not local to that driver will be set to ``NaN``.
"""
self.local_only = True
return self
def by_case(self):
"""
Have :meth:`fetch` return data as ``[case][var]`` (the default).
"""
self.transpose = False
return self
def by_variable(self):
"""
Have :meth:`fetch` return data as ``[var][case]`` rather than the
default of ``[case][var]``.
"""
self.transpose = True
return self
def var_names(self):
""" Return a list of the names of the variables in the cases. """
self.names = True
return self
class _HDF5Reader(object):
""" Reads a :class:`HDF5CaseRecorder` file. """
def __init__(self, filename):
import h5py
self._inp = h5py.File(filename,'r')
self._simulation_info = self.read_simulation_info()
self._state = 'drivers'
self._info = None
@property
def simulation_info(self):
""" Simulation info dictionary. """
return self._simulation_info
def read_iteration_case_from_hdf5( self, hdf5file, driver_name, iteration_case_name ) :
info = {}
driver_grp = self._inp['/iteration_cases'][driver_name]
iteration_grp = self._inp['/iteration_cases'][driver_name][iteration_case_name]
info['metadata'] = self.read_from_hdf5(iteration_grp['metadata'])
data_grp = iteration_grp['data']
info['data'] = {}
float_names = driver_grp['float_names']
int_names = driver_grp['int_names']
str_names = driver_grp['str_names']
for i, name in enumerate(float_names):
info['data'][name] = data_grp['array_of_floats'][i]
for i, name in enumerate(str_names):
info['data'][name] = data_grp['array_of_strs'][i]
for i, name in enumerate(int_names):
info['data'][name] = data_grp['array_of_ints'][i]
for name in data_grp.keys():
if name not in ['array_of_ints','array_of_strs', 'array_of_floats']:
if '__vartree__' in data_grp[name].attrs:
info['data'][name] = {}
for n, v in data_grp[name].items():
info['data'][name][n] = self.read_from_hdf5(data_grp[name][n])
info['data'][name] = self.read_from_hdf5(data_grp[name])
return info
def read_from_hdf5(self, value ):
import h5py
if isinstance(value, h5py._hl.group.Group):
d = {}
group = value
for name, value in group.attrs.items() :
d[ name ] = self.read_from_hdf5( value )
for name, value in group.items() :
d[ name ] = self.read_from_hdf5( value )
return d
elif value.dtype.names :
d = {}
for name in value.dtype.names:
d[ name ] = value[ name ][0]
return d
else:
return value[()]
def read_simulation_info( self ):
sim_info_grp = self._inp['simulation_info']
sim_info = {}
for name, value in sim_info_grp.attrs.items() :
sim_info[ name ] = self.read_from_hdf5( value )
for name, value in sim_info_grp.items() :
sim_info[ name ] = self.read_from_hdf5( value )
return sim_info
def drivers(self):
""" Return list of 'driver_info' dictionaries. """
driver_info = []
for name in self._inp.keys() :
if name.startswith( 'driver_info_'):
driver_info.append( self.read_from_hdf5( self._inp[name] ) )
return driver_info
def cases(self):
""" Return sequence of 'iteration_case' dictionaries. """
iteration_cases_grp = self._inp['/iteration_cases']
case_timestamps = {}
for driver_name in iteration_cases_grp:
for iteration_case_name in iteration_cases_grp[driver_name] :
if iteration_case_name.startswith('iteration_case_') :
timestamp = iteration_cases_grp[driver_name][iteration_case_name]['metadata']['timestamp'][0]
case_timestamps[timestamp] = ( driver_name, iteration_case_name )
sorted_timestamps = sorted( case_timestamps )
for timestamp in sorted_timestamps:
driver_name, iteration_case_name = case_timestamps[ timestamp ]
info = self.read_iteration_case_from_hdf5( self._inp, driver_name, iteration_case_name )
yield info
def _next(self):
""" Return next dictionary of data. """
pass
|
data/UDST/urbansim/urbansim/tests/test_accounts.py
|
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import accounts
@pytest.fixture(scope='module')
def acc_name():
return 'test'
@pytest.fixture(scope='module')
def acc_bal():
return 1000
@pytest.fixture
def acc(acc_name, acc_bal):
return accounts.Account(acc_name, acc_bal)
def test_init(acc, acc_name):
assert acc.name == acc_name
assert acc.balance == 1000
assert acc.transactions == []
def test_add_transaction(acc, acc_bal):
amount = -50
subaccount = ('a', 'b', 'c')
metadata = {'for': 'light speed engine'}
acc.add_transaction(amount, subaccount, metadata)
assert len(acc.transactions) == 1
assert acc.balance == acc_bal + amount
t = acc.transactions[-1]
assert isinstance(t, accounts.Transaction)
assert t.amount == amount
assert t.subaccount == subaccount
assert t.metadata == metadata
def test_add_transactions(acc, acc_bal):
t1 = accounts.Transaction(200, ('a', 'b', 'c'), None)
t2 = (-50, None, {'to': 'Acme Corp.'})
t3 = (-100, ('a', 'b', 'c'), 'Acme Corp.')
t4 = (42, None, None)
acc.add_transactions((t1, t2, t3, t4))
assert len(acc.transactions) == 4
assert acc.balance == acc_bal + t1[0] + t2[0] + t3[0] + t4[0]
assert acc.total_transactions() == t1[0] + t2[0] + t3[0] + t4[0]
assert acc.total_transactions_by_subacct(('a', 'b', 'c')) == t1[0] + t3[0]
assert acc.total_transactions_by_subacct(None) == t2[0] + t4[0]
assert list(acc.all_subaccounts()) == [('a', 'b', 'c'), None]
assert list(acc.iter_subaccounts()) == [
(('a', 'b', 'c'), t1[0] + t3[0]),
(None, t2[0] + t4[0])]
def test_column_names_from_metadata():
cnfm = accounts._column_names_from_metadata
assert cnfm([]) == []
assert cnfm([{'a': 1, 'b': 2}]) == ['a', 'b']
assert cnfm([{'a': 1}, {'b': 2}]) == ['a', 'b']
assert cnfm([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) == ['a', 'b']
def test_to_frame(acc, acc_bal):
t1 = accounts.Transaction(200, ('a', 'b', 'c'), None)
t2 = (-50, None, {'to': 'Acme Corp.'})
acc.add_transactions((t1, t2))
expected = pd.DataFrame(
[[200, ('a', 'b', 'c'), None],
[-50, None, 'Acme Corp.']],
columns=['amount', 'subaccount', 'to'])
df = acc.to_frame()
pdt.assert_frame_equal(df, expected)
|
data/Yelp/bravado/tests/petstore/user/updateUser_test.py
|
from __future__ import print_function
import pytest
def test_200_success(petstore):
User = petstore.get_model('User')
user = User(
id=1,
username='bozo',
firstName='Bozo',
lastName='TheClown',
email='bozo@clown.com',
password='newpassword',
phone='111-222-3333',
userStatus=2,
)
result = petstore.user.updateUser(username='bozo', body=user).result()
assert result is None
@pytest.mark.xfail(reason='Broken on server side - blindly succeeds')
def test_404_user_not_found(petstore):
User = petstore.get_model('User')
user = User(
id=1,
username='bozo',
firstName='Bozo',
lastName='Smith',
email='bozo@clown.com',
password='letmein',
phone='111-222-3333',
userStatus=3,
)
result = petstore.user.updateUser(
username='i_dont_exist', body=user).result()
print(result)
@pytest.mark.xfail(reason='Broken on server side - blindly succeeds')
def test_400_invalid_username(petstore):
assert False
|
data/adafruit/Adafruit_Python_BluefruitLE/Adafruit_BluefruitLE/bluez_dbus/gatt.py
|
import uuid
import dbus
from ..interfaces import GattService, GattCharacteristic, GattDescriptor
from ..platform import get_provider
_SERVICE_INTERFACE = 'org.bluez.GattService1'
_CHARACTERISTIC_INTERFACE = 'org.bluez.GattCharacteristic1'
_DESCRIPTOR_INTERFACE = 'org.bluez.GattDescriptor1'
class BluezGattService(GattService):
"""Bluez GATT service object."""
def __init__(self, dbus_obj):
"""Create an instance of the GATT service from the provided bluez
DBus object.
"""
self._props = dbus.Interface(dbus_obj, 'org.freedesktop.DBus.Properties')
@property
def uuid(self):
"""Return the UUID of this GATT service."""
return uuid.UUID(str(self._props.Get(_SERVICE_INTERFACE, 'UUID')))
def list_characteristics(self):
"""Return list of GATT characteristics that have been discovered for this
service.
"""
paths = self._props.Get(_SERVICE_INTERFACE, 'Characteristics')
return map(BluezGattCharacteristic,
get_provider()._get_objects_by_path(paths))
class BluezGattCharacteristic(GattCharacteristic):
"""Bluez GATT characteristic object."""
def __init__(self, dbus_obj):
"""Create an instance of the GATT characteristic from the provided bluez
DBus object.
"""
self._characteristic = dbus.Interface(dbus_obj, _CHARACTERISTIC_INTERFACE)
self._props = dbus.Interface(dbus_obj, 'org.freedesktop.DBus.Properties')
@property
def uuid(self):
"""Return the UUID of this GATT characteristic."""
return uuid.UUID(str(self._props.Get(_CHARACTERISTIC_INTERFACE, 'UUID')))
def read_value(self):
"""Read the value of this characteristic."""
return self._characteristic.ReadValue()
def write_value(self, value):
"""Write the specified value to this characteristic."""
self._characteristic.WriteValue(value)
def start_notify(self, on_change):
"""Enable notification of changes for this characteristic on the
specified on_change callback. on_change should be a function that takes
one parameter which is the value (as a string of bytes) of the changed
characteristic value.
"""
def characteristic_changed(iface, changed_props, invalidated_props):
if iface != _CHARACTERISTIC_INTERFACE:
return
if 'Value' not in changed_props:
return
on_change(''.join(map(chr, changed_props['Value'])))
self._props.connect_to_signal('PropertiesChanged', characteristic_changed)
self._characteristic.StartNotify()
def stop_notify(self):
"""Disable notification of changes for this characteristic."""
self._characteristic.StopNotify()
def list_descriptors(self):
"""Return list of GATT descriptors that have been discovered for this
characteristic.
"""
paths = self._props.Get(_CHARACTERISTIC_INTERFACE, 'Descriptors')
return map(BluezGattDescriptor,
get_provider()._get_objects_by_path(paths))
class BluezGattDescriptor(GattDescriptor):
"""Bluez GATT descriptor object."""
def __init__(self, dbus_obj):
"""Create an instance of the GATT descriptor from the provided bluez
DBus object.
"""
self._descriptor = dbus.Interface(dbus_obj, _DESCRIPTOR_INTERFACE)
self._props = dbus.Interface(dbus_obj, 'org.freedesktop.DBus.Properties')
@property
def uuid(self):
"""Return the UUID of this GATT descriptor."""
return uuid.UUID(str(self._props.Get(_DESCRIPTOR_INTERFACE, 'UUID')))
def read_value(self):
"""Read the value of this descriptor."""
return self._descriptor.ReadValue()
|
data/Parsely/pykafka/tests/__init__.py
|
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
data/ImageEngine/gaffer/python/GafferUI/PresetsPlugValueWidget.py
|
import functools
import IECore
import Gaffer
import GafferUI
class PresetsPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, parenting = None ) :
self.__menuButton = GafferUI.MenuButton( "", menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ) )
GafferUI.PlugValueWidget.__init__( self, self.__menuButton, plug, parenting = parenting )
self.__plugMetadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
self._addPopupMenu( self.__menuButton )
self._updateFromPlug()
def _updateFromPlug( self ) :
self.__menuButton.setEnabled( self._editable() )
text = ""
if self.getPlug() is not None :
with self.getContext() :
text = Gaffer.NodeAlgo.currentPreset( self.getPlug() ) or "Invalid"
self.__menuButton.setText( text )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
if self.getPlug() is None :
return result
currentPreset = Gaffer.NodeAlgo.currentPreset( self.getPlug() )
for n in Gaffer.NodeAlgo.presets( self.getPlug() ) :
result.append(
"/" + n,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__applyPreset ), preset = n ),
"checkBox" : n == currentPreset,
}
)
return result
def __applyPreset( self, unused, preset ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
Gaffer.NodeAlgo.applyPreset( self.getPlug(), preset )
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.getPlug() is None :
return
if plug is not None and not plug.isSame( self.getPlug() ) :
return
if not self.getPlug().node().isInstanceOf( nodeTypeId ) :
return
if key.startswith( "preset:" ) :
self._updateFromPlug()
|
data/Nextdoor/ndscheduler/simple_scheduler/jobs/curl_job.py
|
"""A job to send a HTTP (GET or DELETE) periodically."""
import logging
import requests
from ndscheduler import job
logger = logging.getLogger(__name__)
class CurlJob(job.JobBase):
TIMEOUT = 10
@classmethod
def meta_info(cls):
return {
'job_class_string': '%s.%s' % (cls.__module__, cls.__name__),
'notes': 'This sends a HTTP request to a particular URL',
'arguments': [
{'type': 'string', 'description': 'What URL you want to make a GET call?'},
{'type': 'string', 'description': 'What request type do you want? '
'(currently supported: GET/DELETE)'},
],
'example_arguments': ('["http://localhost:8888/api/v1/jobs", "GET"]'
'["http://localhost:8888/api/v1/jobs/ba12e", "DELETE"]')
}
def run(self, url, request_type, *args, **kwargs):
print('Calling GET on url: %s' % (url))
session = requests.Session()
result = session.request(request_type,
url,
timeout=self.TIMEOUT,
headers=None,
data=None)
print(result.text)
if __name__ == "__main__":
job = CurlJob.create_test_instance()
job.run('http://localhost:888/api/v1/jobs')
|
data/aboutyou/aboutyou-python-sdk/aboutyou/django/middleware.py
|
"""
:Author: Arne Simon [arne.simon@slice-dice.de]
"""
import django.core.exceptions
try:
from django.conf import settings
from django.contrib.auth import authenticate, login
except django.core.exceptions.ImproperlyConfigured:
pass
import logging
logger = logging.getLogger("aboutyou.middleware")
class AboutyouMiddleware(object):
"""
An authentication middleware which uses aboutyou access token.
This class uses the access token in the Authorization header or
the *aboutyou_access_token* cookie for authentication.
.. rubric:: Usage
Add the class in **settings.py** to the middleware classes.
.. code-block:: python
MIDDLEWARE_CLASSES = (
...
'aboutyou.django.middleware.AboutyouMiddleware',
)
AUTH_REDIRECT_PATH = '/redirect'
"""
def process_request(self, request):
try:
user = None
if not request.user.is_authenticated():
access_token = None
if "HTTP_AUTHORIZATION" in request.META:
access_token = request.META["HTTP_AUTHORIZATION"].split(' ')[1]
logger.debug('got Authorization Header token: %s', access_token)
else:
code = request.GET.get('code')
state = request.GET.get('state')
if code and state:
redirect_uri = request.build_absolut_uri(settings.AUTH_REDIRECT_PATH)
access_token = settings.AUTH.access_token(code, redirect_uri)['access_token']
if access_token:
user = authenticate(access_token=access_token)
if user is not None and not user.is_anonymous():
login(request, user)
except Exception:
logger.exception('')
|
data/WestpointLtd/pytls/tls/ext_servername.py
|
import struct
from handshake import *
from utils import *
class ServerNameExtension(TLSExtension):
HostName = 0
def __init__(self):
TLSExtension.__init__(self)
@classmethod
def create(cls, hostname, hostnames=[], name_type=HostName):
if len(hostnames) == 0:
hostnames = [hostname]
name_list = ''
for hostname in hostnames:
name = struct.pack('!BH%ds' % (len(hostname)),
name_type,
len(hostname),
hostname)
name_list += name
data = struct.pack('!HH%ds' % (len(name_list)),
len(name_list) + 2,
len(name_list),
name_list)
return TLSExtension.create(TLSExtension.ServerName, data)
|
data/acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/gdata/blogger/client.py
|
"""Contains a client to communicate with the Blogger servers.
For documentation on the Blogger API, see:
http://code.google.com/apis/blogger/
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import gdata.client
import gdata.gauth
import gdata.blogger.data
import atom.data
import atom.http_core
BLOGS_URL = 'http://www.blogger.com/feeds/%s/blogs'
BLOG_POST_URL = 'http://www.blogger.com/feeds/%s/posts/default'
BLOG_PAGE_URL = 'http://www.blogger.com/feeds/%s/pages/default'
BLOG_POST_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/%s/comments/default'
BLOG_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/comments/default'
BLOG_ARCHIVE_URL = 'http://www.blogger.com/feeds/%s/archive/full'
class BloggerClient(gdata.client.GDClient):
api_version = '2'
auth_service = 'blogger'
auth_scopes = gdata.gauth.AUTH_SCOPES['blogger']
def get_blogs(self, user_id='default', auth_token=None,
desired_class=gdata.blogger.data.BlogFeed, **kwargs):
return self.get_feed(BLOGS_URL % user_id, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetBlogs = get_blogs
def get_posts(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPostFeed, query=None,
**kwargs):
return self.get_feed(BLOG_POST_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPosts = get_posts
def get_pages(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPageFeed, query=None,
**kwargs):
return self.get_feed(BLOG_PAGE_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPages = get_pages
def get_post_comments(self, blog_id, post_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, desired_class=desired_class,
query=query, **kwargs)
GetPostComments = get_post_comments
def get_blog_comments(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_COMMENTS_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetBlogComments = get_blog_comments
def get_blog_archive(self, blog_id, auth_token=None, **kwargs):
return self.get_feed(BLOG_ARCHIVE_URL % blog_id, auth_token=auth_token,
**kwargs)
GetBlogArchive = get_blog_archive
def add_post(self, blog_id, title, body, labels=None, draft=False,
auth_token=None, title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.BlogPost(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if labels:
for label in labels:
new_entry.add_label(label)
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_POST_URL % blog_id, auth_token=auth_token, **kwargs)
AddPost = add_post
def add_page(self, blog_id, title, body, draft=False, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.BlogPage(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_PAGE_URL % blog_id, auth_token=auth_token, **kwargs)
AddPage = add_page
def add_comment(self, blog_id, post_id, body, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.Comment(
content=atom.data.Content(text=body, type=body_type))
return self.post(new_entry, BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, **kwargs)
AddComment = add_comment
def update(self, entry, auth_token=None, **kwargs):
old_etag = entry.etag
entry.etag = None
response = gdata.client.GDClient.update(self, entry,
auth_token=auth_token, **kwargs)
entry.etag = old_etag
return response
Update = update
def delete(self, entry_or_uri, auth_token=None, **kwargs):
if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)):
return gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
old_etag = entry_or_uri.etag
entry_or_uri.etag = None
response = gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
entry_or_uri.etag = old_etag
return response
Delete = delete
class Query(gdata.client.Query):
def __init__(self, order_by=None, **kwargs):
gdata.client.Query.__init__(self, **kwargs)
self.order_by = order_by
def modify_request(self, http_request):
gdata.client._add_query_param('orderby', self.order_by, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
|
data/T0ha/ezodf/setup.py
|
import os
from distutils.core import setup
from version import VERSION
AUTHOR_NAME = 'Manfred Moitzi'
AUTHOR_EMAIL = 'mozman@gmx.at'
MAINTAINER_NAME = 'Anton Shvein'
MAINTAINER_EMAIL = 't0hashvein@gmail.com'
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return "File '%s' not found.\n" % fname
setup(name='ezodf',
version=VERSION,
description='A Python package to create/manipulate OpenDocumentFormat files.',
author=AUTHOR_NAME,
url='https://github.com/T0ha/ezodf',
download_url='https://github.com/T0ha/ezodf/releases',
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER_NAME,
maintainer_email=MAINTAINER_EMAIL,
packages=['ezodf'],
provides=['ezodf'],
requires=['weakrefset', 'lxml'],
keywords=['ODF', 'OpenDocumentFormat', 'OpenOffice', 'LibreOffice'],
long_description=read('README.rst')+read('NEWS.rst'),
platforms="OS Independent",
license="MIT License",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Office/Business :: Office Suites",
]
)
|
data/agoragames/chai/tests/comparator_test.py
|
import unittest
import sys
from chai.comparators import *
if sys.version_info.major==2:
from comparator_py2 import *
class ComparatorsTest(unittest.TestCase):
def test_build_comparators_builds_equals(self):
comp = build_comparators("str")[0]
self.assertTrue(isinstance(comp, Equals))
comp = build_comparators(12)[0]
self.assertTrue(isinstance(comp, Equals))
comp = build_comparators(12.1)[0]
self.assertTrue(isinstance(comp, Equals))
comp = build_comparators([])[0]
self.assertTrue(isinstance(comp, Equals))
comp = build_comparators({})[0]
self.assertTrue(isinstance(comp, Equals))
comp = build_comparators(tuple())[0]
self.assertTrue(isinstance(comp, Equals))
def test_build_comparators_is_a(self):
class CustomObject(object): pass
comp = build_comparators(CustomObject)[0]
self.assertTrue(isinstance(comp, Any))
self.assertTrue( comp.test(CustomObject) )
self.assertTrue( comp.test(CustomObject()) )
def test_build_comparators_passes_comparators(self):
any_comp = Any()
comp = build_comparators(any_comp)[0]
self.assertTrue(comp is any_comp)
def test_equals(self):
comp = Equals(3)
self.assertTrue( comp.test(3) )
self.assertTrue( comp.test(3.0) )
self.assertFalse( comp.test('3') )
def test_equals_repr(self):
comp = Equals(3)
self.assertEquals(str(comp), "3")
def test_eq(self):
comp = Equals(3)
self.assertEquals( comp, 3 )
def test_is_a(self):
comp = IsA(str)
self.assertTrue( comp.test('foo') )
if sys.version_info.major==2:
self.assertFalse( comp.test(bytearray('foo')) )
else:
self.assertFalse( comp.test(bytearray('foo'.encode('ascii'))) )
comp = IsA((str,int))
self.assertTrue( comp.test('') )
self.assertTrue( comp.test(42) )
self.assertFalse( comp.test(3.14) )
def test_is_a_repr(self):
comp = IsA(str)
self.assertEquals(repr(comp), "IsA(str)")
def test_is_a_format_name(self):
comp = IsA(str)
self.assertEquals(comp._format_name(), "str")
comp = IsA((str, list))
self.assertEquals(comp._format_name(), "['str', 'list']")
def test_is(self):
class Test(object):
def __eq__(self, other): return True
obj1 = Test()
obj2 = Test()
comp = Is(obj1)
self.assertEquals( obj1, obj2 )
self.assertTrue( comp.test(obj1) )
self.assertFalse( comp.test(obj2) )
def test_is_repr(self):
class TestObj(object):
def __str__(self):
return "An Object"
obj = TestObj()
self.assertEquals(repr(Is(obj)), "Is(An Object)" )
def test_almost_equal(self):
comp = AlmostEqual(3.14159265, 3)
self.assertTrue( comp.test(3.1416) )
self.assertFalse( comp.test(3.14) )
def test_almost_equal_repr(self):
comp = AlmostEqual(3.14159265, 3)
self.assertEquals(repr(comp), "AlmostEqual(value: 3.14159265, places: 3)")
def test_regex(self):
comp = Regex('[wf][io]{2}')
self.assertTrue( comp.test('fii') )
self.assertTrue( comp.test('woo') )
self.assertFalse( comp.test('fuu') )
def test_regex_repr(self):
comp = Regex('[wf][io]{2}')
self.assertEquals(repr(comp), "Regex(pattern: [wf][io]{2}, flags: 0)")
def test_any(self):
comp = Any(1,2.3,str)
self.assertTrue( comp.test(1) )
self.assertTrue( comp.test(2.3) )
self.assertFalse( comp.test(4) )
def test_any_repr(self):
comp = Any(1,2,3,str)
if sys.version_info.major==2:
self.assertEquals(repr(comp), "Any([1, 2, 3, Any([IsA(str), Is(<type 'str'>)])])")
else:
self.assertEquals(repr(comp), "Any([1, 2, 3, Any([IsA(str), Is(<class 'str'>)])])")
def test_in(self):
comp = In(['foo', 'bar'])
self.assertTrue( comp.test('foo') )
self.assertTrue( comp.test('bar') )
self.assertFalse( comp.test('none') )
def test_in_repr(self):
comp = In(['foo', 'bar'])
self.assertEqual(repr(comp), "In(['foo', 'bar'])")
def test_contains(self):
comp = Contains('foo')
self.assertTrue( comp.test('foobar') )
self.assertTrue( comp.test(['foo','bar']) )
self.assertTrue( comp.test({'foo':'bar'}) )
self.assertFalse( comp.test('feet') )
def test_contains_repr(self):
comp = Contains("foo")
self.assertEqual(repr(comp), "Contains('foo')")
def test_all(self):
comp = All(IsA(bytearray), Equals('foo'.encode('ascii')))
self.assertTrue( comp.test(bytearray('foo'.encode('ascii'))) )
self.assertFalse( comp.test('foo') )
self.assertEquals( 'foo'.encode('ascii'), bytearray('foo'.encode('ascii')) )
def test_all_repr(self):
comp = All(IsA(bytearray), Equals('foobar'))
self.assertEqual(repr(comp), "All([IsA(bytearray), 'foobar'])")
def test_not(self):
comp = Not( Any(1,3) )
self.assertTrue( comp.test(2) )
self.assertFalse( comp.test(1) )
self.assertFalse( comp.test(3) )
def test_no_repr(self):
comp = Not(Any(1,3))
self.assertEqual(repr(comp), "Not([Any([1, 3])])")
def test_function(self):
r = [True,False]
comp = Function(lambda arg: r[arg])
self.assertTrue( comp.test(0) )
self.assertFalse( comp.test(1) )
def test_function_repr(self):
func = lambda arg: True
comp = Function(func)
self.assertEqual(repr(comp), "Function(%s)" % str(func))
def test_ignore(self):
comp = Ignore()
self.assertTrue( comp.test('srsly?') )
def test_ignore_repr(self):
comp = Ignore()
self.assertEqual(repr(comp), "Ignore()")
def test_variable(self):
comp = Variable('foo')
self.assertEquals( 0, len(Variable._cache) )
self.assertTrue( comp.test('bar') )
self.assertEquals( 1, len(Variable._cache) )
self.assertTrue( comp.test('bar') )
self.assertFalse( comp.test('bar2') )
self.assertTrue( Variable('foo').test('bar') )
self.assertFalse( Variable('foo').test('bar2') )
self.assertEquals( 1, len(Variable._cache) )
self.assertEquals( 'bar', comp.value )
self.assertEquals( 'bar', Variable('foo').value )
v = Variable('foo2')
self.assertEquals( 1, len(Variable._cache) )
v.test('dog')
self.assertEquals( 'dog', v.value )
self.assertEquals( 2, len(Variable._cache) )
Variable.clear()
self.assertEquals( 0, len(Variable._cache) )
def test_variable_repr(self):
v = Variable('foo')
self.assertEquals( repr(v), "Variable('foo')" )
def test_like_init(self):
c = Like({'foo':'bar'})
self.assertEquals( {'foo':'bar'}, c._src )
c = Like(['foo', 'bar'])
self.assertEquals( ['foo','bar'], c._src )
def test_like_test(self):
c = Like({'foo':'bar'})
self.assertTrue( c.test({'foo':'bar'}) )
self.assertTrue( c.test({'foo':'bar', 'cat':'dog'}) )
self.assertFalse( c.test({'foo':'barf'}) )
c = Like(['foo','bar'])
self.assertTrue( c.test(['foo','bar']) )
self.assertTrue( c.test(['foo','bar','cat','dog']) )
self.assertFalse( c.test(['foo','barf']) )
def test_like_repr(self):
c = Like({'foo':'bar'})
self.assertEquals( repr(c), "Like({'foo': 'bar'})" )
|
data/Lukasa/hyper/hyper/compat.py
|
"""
hyper/compat
~~~~~~~~~~~~
Normalizes the Python 2/3 API for internal use.
"""
from contextlib import contextmanager
import sys
import zlib
try:
from . import ssl_compat
except ImportError:
ssl_compat = None
_ver = sys.version_info
is_py2 = _ver[0] == 2
is_py2_7_9_or_later = _ver[0] >= 2 and _ver[1] >= 7 and _ver[2] >= 9
is_py3 = _ver[0] == 3
is_py3_3 = is_py3 and _ver[1] == 3
@contextmanager
def ignore_missing():
try:
yield
except (AttributeError, NotImplementedError):
pass
if is_py2:
if is_py2_7_9_or_later:
import ssl
else:
ssl = ssl_compat
from urllib import urlencode
from urlparse import urlparse, urlsplit
from itertools import imap
def to_byte(char):
return ord(char)
def decode_hex(b):
return b.decode('hex')
def write_to_stdout(data):
sys.stdout.write(data + '\n')
sys.stdout.flush()
def zlib_compressobj(level=6, method=zlib.DEFLATED, wbits=15, memlevel=8,
strategy=zlib.Z_DEFAULT_STRATEGY):
return zlib.compressobj(level, method, wbits, memlevel, strategy)
unicode = unicode
bytes = str
elif is_py3:
from urllib.parse import urlencode, urlparse, urlsplit
imap = map
def to_byte(char):
return char
def decode_hex(b):
return bytes.fromhex(b)
def write_to_stdout(data):
sys.stdout.buffer.write(data + b'\n')
sys.stdout.buffer.flush()
zlib_compressobj = zlib.compressobj
if is_py3_3:
ssl = ssl_compat
else:
import ssl
unicode = str
bytes = bytes
|
data/MostAwesomeDude/construct/construct/formats/filesystem/__init__.py
|
"""
file systems on-disk formats (ext2, fat32, ntfs, ...)
and related disk formats (mbr, ...)
"""
|
data/MirantisWorkloadMobility/CloudFerry/tests/lib/os/actions/test_converter_volume_to_image.py
|
import mock
from cloudferry.lib.os.actions import convert_volume_to_image
from cloudferry.lib.utils import utils
from tests import test
class ConverterVolumeToImageTest(test.TestCase):
def setUp(self):
super(ConverterVolumeToImageTest, self).setUp()
self.fake_src_cloud = mock.Mock()
self.fake_storage = mock.Mock()
self.fake_storage.deploy = mock.Mock()
self.fake_storage.upload_volume_to_image.return_value = (
'resp', 'image_id')
self.fake_storage.get_backend.return_value = 'ceph'
self.fake_image = mock.Mock()
self.fake_image.wait_for_status = mock.Mock()
self.fake_image.get_image_by_id_converted = mock.Mock()
self.fake_image.get_image_by_id_converted.return_value = {
'images': {
'image_id': {'image': 'image_body', 'meta': {}}}}
self.fake_image.patch_image = mock.Mock()
self.fake_src_cloud.resources = {'storage': self.fake_storage,
'image': self.fake_image}
self.fake_volumes_info = {
'volumes': {
'id1': {
'volume': {
'id': 'id1',
'display_name': 'dis1',
},
'meta': {
'image': 'image',
},
}},
}
self.fake_dst_cloud = mock.Mock()
self.fake_config = utils.ext_dict(migrate=utils.ext_dict(
{'disk_format': 'qcow',
'container_format': 'bare'}))
self.fake_init = {
'src_cloud': self.fake_src_cloud,
'dst_cloud': self.fake_dst_cloud,
'cfg': self.fake_config
}
def test_action(self):
fake_action = convert_volume_to_image.ConvertVolumeToImage(
self.fake_init,
cloud='src_cloud')
res = fake_action.run(self.fake_volumes_info)
self.assertEqual('image_body',
res['images_info']['images']['image_id']['image'])
self.assertEqual('dis1',
res['images_info']['images']['image_id']['meta'][
'volume']['display_name'])
|
data/acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/gdata/alt/app_engine.py
|
"""Provides functions to persist serialized auth tokens in the datastore.
The get_token and set_token functions should be used in conjunction with
gdata.gauth's token_from_blob and token_to_blob to allow auth token objects
to be reused across requests. It is up to your own code to ensure that the
token key's are unique.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
from google.appengine.ext import db
from google.appengine.api import memcache
class Token(db.Model):
"""Datastore Model which stores a serialized auth token."""
t = db.BlobProperty()
def get_token(unique_key):
"""Searches for a stored token with the desired key.
Checks memcache and then the datastore if required.
Args:
unique_key: str which uniquely identifies the desired auth token.
Returns:
A string encoding the auth token data. Use gdata.gauth.token_from_blob to
convert back into a usable token object. None if the token was not found
in memcache or the datastore.
"""
token_string = memcache.get(unique_key)
if token_string is None:
token = Token.get_by_key_name(unique_key)
if token is None:
return None
return token.t
return token_string
def set_token(unique_key, token_str):
"""Saves the serialized auth token in the datastore.
The token is also stored in memcache to speed up retrieval on a cache hit.
Args:
unique_key: The unique name for this token as a string. It is up to your
code to ensure that this token value is unique in your application.
Previous values will be silently overwitten.
token_str: A serialized auth token as a string. I expect that this string
will be generated by gdata.gauth.token_to_blob.
Returns:
True if the token was stored sucessfully, False if the token could not be
safely cached (if an old value could not be cleared). If the token was
set in memcache, but not in the datastore, this function will return None.
However, in that situation an exception will likely be raised.
Raises:
Datastore exceptions may be raised from the App Engine SDK in the event of
failure.
"""
result = memcache.set(unique_key, token_str)
if not result:
result = memcache.delete(unique_key)
if result == 0:
return False
if Token(key_name=unique_key, t=token_str).put():
return True
return None
def delete_token(unique_key):
memcache.delete(unique_key)
Token(key_name=unique_key).delete()
|
data/StackStorm/st2/st2tests/st2tests/actions.py
|
from unittest2 import TestCase
from st2actions.runners.utils import get_action_class_instance
from st2tests.mocks.action import MockActionWrapper
from st2tests.mocks.action import MockActionService
__all__ = [
'BaseActionTestCase'
]
class BaseActionTestCase(TestCase):
"""
Base class for Python runner action tests.
"""
action_cls = None
def setUp(self):
super(BaseActionTestCase, self).setUp()
class_name = self.action_cls.__name__
action_wrapper = MockActionWrapper(pack='tests', class_name=class_name)
self.action_service = MockActionService(action_wrapper=action_wrapper)
def get_action_instance(self, config=None):
"""
Retrieve instance of the action class.
"""
instance = get_action_class_instance(action_cls=self.action_cls,
config=config,
action_service=self.action_service)
return instance
|
data/Theano/Theano/theano/tensor/nnet/Conv3D.py
|
from __future__ import absolute_import, print_function, division
import numpy as N
from six.moves import xrange
import theano
from theano.tensor import basic as T
from theano.tensor.blas_headers import blas_header_text, blas_header_version
from theano.tensor.blas import ldflags
from theano.misc import strutil
from theano.gradient import grad_undefined
class Conv3D(theano.Op):
"""
3D `convolution` of multiple filters on a minibatch.
Notes
-----
Does not flip the kernel, moves kernel with a user specified stride.
"""
__props__ = ()
def c_code_cache_version(self):
return (3, blas_header_version())
def make_node(self, V, W, b, d):
"""
Parameters
----------
V
Visible unit, input(batch,row,column,time,in channel)
W
Weights, filter(out channel,row,column,time,in channel)
b
bias, shape == (W.shape[0],)
d
strides when moving the filter over the input(dx,dy,dt)
"""
V_ = T.as_tensor_variable(V)
W_ = T.as_tensor_variable(W)
b_ = T.as_tensor_variable(b)
d_ = T.as_tensor_variable(d)
bcast = (V_.broadcastable[0], False, False, False, W_.broadcastable[0])
node = theano.Apply(self, inputs=[V_, W_, b_, d_],
outputs=[T.TensorType(V_.dtype, bcast)()])
return node
def grad(self, inputs, output_gradients):
V, W, b, d = inputs
dCdH, = output_gradients
dCdV = theano.tensor.nnet.convTransp3D(
W, T.zeros_like(V[0, 0, 0, 0, :]), d, dCdH, V.shape[1:4])
dCdV = T.patternbroadcast(dCdV, V.broadcastable)
WShape = W.shape
dCdW = theano.tensor.nnet.convGrad3D(V, d, WShape, dCdH)
dCdW = T.patternbroadcast(dCdW, W.broadcastable)
dCdb = T.sum(dCdH, axis=(0, 1, 2, 3))
dCdb = T.patternbroadcast(dCdb, b.broadcastable)
dCdd = grad_undefined(
self, 3, inputs[3],
"The gradient of Conv3D with respect to the convolution"
" stride is undefined because Conv3D is only defined for"
" integer strides.")
if 'name' in dir(dCdH) and dCdH.name is not None:
dCdH_name = dCdH.name
else:
dCdH_name = 'anon_dCdH'
if 'name' in dir(V) and V.name is not None:
V_name = V.name
else:
V_name = 'anon_V'
if 'name' in dir(W) and W.name is not None:
W_name = W.name
else:
W_name = 'anon_W'
if 'name' in dir(b) and b.name is not None:
b_name = b.name
else:
b_name = 'anon_b'
dCdV.name = 'Conv3D_dCdV(dCdH=' + dCdH_name + ',V=' + V_name + ')'
dCdW.name = ('Conv3D_dCdW(dCdH=' + dCdH_name + ',V=' + V_name +
',W=' + W_name + ')')
dCdb.name = ('Conv3D_dCdb(dCdH=' + dCdH_name + ',V=' + V_name +
',W=' + W_name + ',b=' + b_name + ')')
return [dCdV, dCdW, dCdb, dCdd]
def perform(self, node, inputs, output_storage):
V, W, b, d = inputs
output_storage[0][0] = computeH(V, W, b, d)
def infer_shape(self, node, input_shapes):
V, W, b, d = node.inputs
V_shape, W_shape, b_shape, d_shape = input_shapes
dr = d[0]
dc = d[1]
dt = d[2]
batch_size = V_shape[0]
output_channels = W_shape[0]
vidHeight = V_shape[1]
filterHeight = W_shape[1]
vidWidth = V_shape[2]
filterWidth = W_shape[2]
vidDur = V_shape[3]
filterDur = W_shape[3]
output_height = ((vidHeight - filterHeight) // dr) + 1
output_width = ((vidWidth - filterWidth) // dc) + 1
output_dur = ((vidDur - filterDur) // dt) + 1
rval = (batch_size, output_height, output_width, output_dur, output_channels)
return [rval]
def c_support_code(self):
return blas_header_text()
def c_libraries(self):
return ldflags()
def c_compile_args(self):
flags = ldflags(libs=False, flags=True)
return flags
def c_lib_dirs(self):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return ldflags(libs=False, include_dir=True)
def c_code(self, node, nodename, inputs, outputs, sub):
V, W, b, d = inputs
fail = sub['fail']
H = outputs[0]
codeSource = """
///////////// < code generated by Conv3D >
//printf("\t\t\t\tConv3D c code\\n");
//Check dimensionality of inputs
if (PyArray_NDIM(%(W)s) != 5)
{
PyErr_Format(PyExc_ValueError, "Conv3D: W must be a 5 dimensional tensor");
%(fail)s
}
if (PyArray_NDIM(%(V)s) != 5)
{
PyErr_Format(PyExc_ValueError, "Conv3D: V must be a 5 dimensional tensor");
%(fail)s
}
if (PyArray_NDIM(%(b)s) != 1)
{
PyErr_Format(PyExc_ValueError,"Conv3D: b must be a vector.");
%(fail)s
}
if (PyArray_NDIM(%(d)s) != 1)
{
PyErr_Format(PyExc_ValueError,"Conv3D: d must be a vector.");
%(fail)s
}
if (PyArray_DIMS(%(d)s)[0] != 3)
{
PyErr_Format(PyExc_ValueError,"Conv3D: 3 stride length arguments expected (row, col, time) but %%li were given", (long)PyArray_DIMS(%(d)s)[0]);
%(fail)s
}
//Read and check sizes of inputs
{ // exta scope so error handler jumps don't cause errors
const int batchSize = PyArray_DIMS(%(V)s)[0];
const int outputChannels = PyArray_DIMS(%(W)s)[0];
const int inputChannels = PyArray_DIMS(%(V)s)[4];
if (PyArray_DIMS(%(W)s)[4] != inputChannels)
{
PyErr_Format(PyExc_ValueError, "Conv3D: W operates on a %%ld channel image but the image has %%d channels. Overall shape of input: (%%ld,%%ld,%%ld,%%ld,%%ld)", (long)PyArray_DIMS(%(W)s)[4], inputChannels, (long)PyArray_DIMS(%(V)s)[0], (long)PyArray_DIMS(%(V)s)[1], (long)PyArray_DIMS(%(V)s)[2], (long)PyArray_DIMS(%(V)s)[3], (long)PyArray_DIMS(%(V)s)[4]);
%(fail)s
}
if (PyArray_DIMS(%(b)s)[0] != outputChannels)
{
PyErr_Format(PyExc_ValueError, "Conv3D: b adds to a(n) %%ld channel output image but the output has %%d channels", (long)PyArray_DIMS(%(b)s)[0], outputChannels);
%(fail)s
}
{ //extra scope so error handler jumps don't cause errors
const int filterHeight = PyArray_DIMS(%(W)s)[1];
const int filterWidth = PyArray_DIMS(%(W)s)[2];
const int filterDur = PyArray_DIMS(%(W)s)[3];
const int vidHeight = PyArray_DIMS(%(V)s)[1];
const int vidWidth = PyArray_DIMS(%(V)s)[2];
const int vidDur = PyArray_DIMS(%(V)s)[3];\
if (vidHeight < filterHeight)
{
PyErr_Format(PyExc_ValueError, "W has a height of %%i but V is only %%i pixels tall",filterHeight,vidHeight);
%(fail)s
}
{ // extra scope so fail works
if (vidWidth < filterWidth)
{
PyErr_Format(PyExc_ValueError, "W has a width of %%i but V is only %%i pixels wide",filterWidth,vidWidth);
%(fail)s
}
{ // extra scope so fail works
if (vidDur < filterDur)
{
PyErr_Format(PyExc_ValueError, "W has a duration of %%i but V is only %%i pixels long",filterDur,vidDur);
%(fail)s
}
{ // extra scope so fail works
//Read and check stride arguments
const int dr = *(dtype_%(d)s*) PyArray_GETPTR1(%(d)s,0);
const int dc = *(dtype_%(d)s*) PyArray_GETPTR1(%(d)s,1);
const int dt = *(dtype_%(d)s*) PyArray_GETPTR1(%(d)s,2);
if (dr <= 0 || dc <= 0 || dt <= 0)
{
PyErr_Format(PyExc_ValueError,"Conv3D: Strides must all be positive but are %%i, %%i, %%i",dr,dc,dt);
%(fail)s
}
{ // extra scope so fail works
//Make correctly sized output
const long long outputHeight = int( (vidHeight - filterHeight) / dr )+1;
const long long outputWidth = int( (vidWidth - filterWidth) / dc )+1;
const long long outputDur = int( (vidDur - filterDur) / dt ) +1;
npy_intp dims[5];
dims[0] = batchSize;
dims[4] = outputChannels;
dims[1] = outputHeight;
dims[2] = outputWidth;
dims[3] = outputDur;
if(!(%(H)s) || PyArray_DIMS(%(H)s)[0]!=dims[0] ||
PyArray_DIMS(%(H)s)[1]!=dims[1] ||
PyArray_DIMS(%(H)s)[2]!=dims[2] ||
PyArray_DIMS(%(H)s)[3]!=dims[3] ||
PyArray_DIMS(%(H)s)[4]!=dims[4]){
Py_XDECREF(%(H)s);
%(H)s = (PyArrayObject *) PyArray_SimpleNew(5, dims, PyArray_DESCR(%(V)s)->type_num);
if (!(%(H)s)) {
PyErr_Format(PyExc_MemoryError,"Conv3D: Could not allocate output.");
%(fail)s
}
}
{ // extra scope so fail works
const int ws0 = PyArray_STRIDES(%(W)s)[0];
const int ws1 = PyArray_STRIDES(%(W)s)[1];
const int ws2 = PyArray_STRIDES(%(W)s)[2];
const int vs1 = PyArray_STRIDES(%(V)s)[1];
const int ws4 = PyArray_STRIDES(%(W)s)[4];
const int vs4 = PyArray_STRIDES(%(V)s)[4];
const int ws3 = PyArray_STRIDES(%(W)s)[3];
const int vs3 = PyArray_STRIDES(%(V)s)[3];
const int vs2 = PyArray_STRIDES(%(V)s)[2];
const int bs = PyArray_STRIDES(%(b)s)[0];
const int hs4 = PyArray_STRIDES(%(H)s)[4];
// Compute H
//H[i,j,x,y,t] = b_j + sum_k sum_l sum_m sum_z W[j,z,k,l,m] V[i,z, dr*r+k,dc*c+l,dt*t+m]
//TODO: add special cases
// ex: filterDur == 1 && batchSize == 1 && dt = 1 (for SFA)
// ex: inputChannels == 1 """
VV, WV, bv, dv = node.inputs
HV = node.outputs[0]
if (theano.config.blas.ldflags and
VV.dtype == WV.dtype and HV.dtype == VV.dtype):
if VV.dtype == 'float64':
gemv = 'dgemv_'
elif VV.dtype == 'float32':
gemv = 'sgemv_'
else:
raise Exception('Unrecognized dtype for convolution ' + V.value.dtype)
codeSource += """
if (inputChannels > 20 && outputChannels > 20 && ws4 == sizeof(ELEM_AT(%(W)s,0)))
{
//std::cout << "lots of channels special case code" << std::endl;
const blas_type constant_one = 1.0;
char N = 'T';
int ws0e = ws0 / sizeof(ELEM_AT(%(W)s,0));
int vs4e = vs4 / sizeof(ELEM_AT(%(V)s,4));
int hs4e = hs4 / sizeof(ELEM_AT(%(H)s,4));
//special case code for the "lots of channels" case
//uses a BLAS matrix vector multiply to compute the contribute for
//all channels of an input pixel to all channels of an output pixel
//simultaneously
long long Hpos = 0;
long long Vpos = 0;
for (int i = 0; i < batchSize; i++) {
long long Hposi = Hpos;
long long Vposi = Vpos;
for (int r = 0; r < outputHeight; r++) {
long long Hposr = Hpos;
long long Vposr = Vpos;
for (int c = 0; c < outputWidth; c++) {
long long Hposc = Hpos;
long long Vposc = Vpos;
for (int t = 0; t < outputDur; t++) {
long long Hpost = Hpos;
long long Vpost = Vpos;
//of the loops so far, j should be the innermost, because
//each loop through j visits the same elements of V
//this implies that the last index of H should be the j index
//since V and H should have the same format, this means
//z should be the last index in v, and therefore the innermost
//of the next set of for loops
int Wpos = 0;
int bPos = 0;
long long Hposj = Hpos;
for (int j = 0; j < outputChannels; j++) {
// H[i,r,c,t,j] = b[j]
ELEM_AT(%(H)s,Hposj) = ELEM_AT(%(b)s,bPos);
Hposj += hs4;
bPos += bs;
}
dtype_%(H)s * writePos = & ELEM_AT(%(H)s,Hpos);
for (int k =0; k < filterHeight; k++) {
int Wposk = Wpos;
long long Vposk = Vpos;
for (int l = 0; l < filterWidth; l++) {
int Wposl = Wpos;
long long Vposl = Vpos;
for (int m = 0; m < filterDur; m++) {
//H[i,r,c,t,:] += N.dot(W[:,k,l,m,:],V[i,dr*r+k,dc*c+l,dt*t+m,:])
//note: changing the weights so that outputChannels and inputChannels were the last two rather than
//the first and last elements did not speed this up, even for extremely large input sizes
%(gemv)s(&N, & inputChannels, & outputChannels,
&constant_one, & ELEM_AT( %(W)s , Wpos),& ws0e,
& ELEM_AT(%(V)s, Vpos),& vs4e, &constant_one,
writePos,& hs4e);
Wpos += ws3;
Vpos += vs3;
} // close m
Wpos = Wposl + ws2;
Vpos = Vposl + vs2;
} //close l
Wpos = Wposk + PyArray_STRIDES(%(W)s)[1];
Vpos = Vposk + PyArray_STRIDES(%(V)s)[1];
} //close k
Hpos = Hpost + PyArray_STRIDES(%(H)s)[3];
Vpos = Vpost + vs3 * dt;
} //close t
Hpos = Hposc + PyArray_STRIDES(%(H)s)[2];
Vpos = Vposc + vs2 * dc;
} //close c
Hpos = Hposr + PyArray_STRIDES(%(H)s)[1];
Vpos = Vposr + PyArray_STRIDES(%(V)s)[1] * dr;
} //closes r
Hpos = Hposi + PyArray_STRIDES(%(H)s)[0];
Vpos = Vposi + PyArray_STRIDES(%(V)s)[0];
} //closes i
} //closes "lots of channels" special case code
else
"""
codeSource += """
{
//General case code
//std::cout << "general case code" << std::endl;
long long Hpos = 0;
long long Vpos = 0;
for (int i = 0; i < batchSize; i++) {
long long Hposi = Hpos;
long long Vposi = Vpos;
for (int r = 0; r < outputHeight; r++) {
long long Hposr = Hpos;
long long Vposr = Vpos;
for (int c = 0; c < outputWidth; c++) {
long long Hposc = Hpos;
long long Vposc = Vpos;
for (int t = 0; t < outputDur; t++) {
long long Hpost = Hpos;
long long Vpost = Vpos;
//of the loops so far, j should be the innermost, because
//each loop through j visits the same elements of V
//this implies that the last index of H should be the j index
//since V and H should have the same format, this means
//z should be the last index in v, and therefore the innermost
//of the next set of for loops
int Wpos = 0;
int bPos = 0;
for (int j = 0; j < outputChannels; j++) {
long long Hposj = Hpos;
long long Vposj = Vpos;
int Wposj = Wpos;
// H[i,r,c,t,j] = b[j]
dtype_%(H)s & writePos = ELEM_AT(%(H)s,Hpos);
writePos = ELEM_AT(%(b)s,bPos);
for (int k =0; k < filterHeight; k++) {
int Wposk = Wpos;
long long Vposk = Vpos;
for (int l = 0; l < filterWidth; l++) {
int Wposl = Wpos;
long long Vposl = Vpos;
for (int m = 0; m < filterDur; m++) {
int Wposm = Wpos;
long long Vposm = Vpos;
for (int z = 0; z < inputChannels; z++) {
//H[i,r,c,t,j] += W[j,z,k,l,m] * V[i,dr*r+k, dc*c+l, dt*t+m,z]
writePos += ELEM_AT(%(W)s,Wpos) * ELEM_AT(%(V)s,Vpos);
Wpos += ws4;
Vpos += vs4;
} // close z
Wpos = Wposm + ws3;
Vpos = Vposm + vs3;
} // close m
Wpos = Wposl + ws2;
Vpos = Vposl + vs2;
} //close l
Wpos = Wposk + PyArray_STRIDES(%(W)s)[1];
Vpos = Vposk + PyArray_STRIDES(%(V)s)[1];
} //close k
bPos += bs;
Wpos = Wposj + ws0;
Hpos = Hposj + hs4;
Vpos = Vposj;
//std::cout << "incremented Wpos by " << ws0 << std::endl;
//std::cout << "incremented Hpos by " << hs4 << std::endl;
} //close j
Hpos = Hpost + PyArray_STRIDES(%(H)s)[3];
Vpos = Vpost + vs3 * dt;
} //close t
Hpos = Hposc + PyArray_STRIDES(%(H)s)[2];
Vpos = Vposc + vs2 * dc;
} //close c
Hpos = Hposr + PyArray_STRIDES(%(H)s)[1];
Vpos = Vposr + PyArray_STRIDES(%(V)s)[1] * dr;
} //closes r
Hpos = Hposi + PyArray_STRIDES(%(H)s)[0];
Vpos = Vposi + PyArray_STRIDES(%(V)s)[0];
} //closes i
} //closes general case code
}}}}}}} //extra scope so error handler jumps don't cross declarations
///////////// < /code generated by Conv3D >
"""
return strutil.render_string(codeSource, locals())
_conv3D = Conv3D()
def conv3D(V, W, b, d):
"""
3D "convolution" of multiple filters on a minibatch.
(does not flip the kernel, moves kernel with a user specified stride)
Parameters
----------
V
Visible unit, input.
Dimensions: (batch, row, column, time, in channel).
W
Weights, filter.
Dimensions: (out channel, row, column, time ,in channel).
b
Bias, shape == (W.shape[0],).
d
Strides when moving the filter over the input(dx, dy, dt).
Notes
-----
The order of dimensions does not correspond to the one in `conv2d`.
This is for optimization.
The GPU implementation is very slow. You should use
:func:`conv3d2d <theano.tensor.nnet.conv3d2d.conv3d>` or
:func:`conv3d_fft <theano.sandbox.cuda.fftconv.conv3d_fft>` for a
GPU graph instead.
See Also
--------
Someone made a script that shows how to swap the axes
between both 3d convolution implementations in Theano. See
the last `attachment <https://groups.google.com/d/msg/theano-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_
"""
return _conv3D(V, W, b, d)
def computeH(V, W, b, d):
assert len(W.shape) == 5
assert len(V.shape) == 5
if len(b.shape) != 1:
print(b.shape)
assert False
assert len(d) == 3
batchSize = V.shape[0]
outputChannels = W.shape[0]
inputChannels = V.shape[4]
if W.shape[4] != inputChannels:
raise Exception("W.shape[4] = " + str(W.shape[4]) + " but inputChannels = " + str(inputChannels))
filterHeight = W.shape[1]
filterWidth = W.shape[2]
filterDur = W.shape[3]
vidHeight = V.shape[1]
vidWidth = V.shape[2]
vidDur = V.shape[3]
assert vidHeight >= filterHeight
assert vidWidth >= filterWidth
assert vidDur >= filterDur
dx, dy, dt = d
assert dx > 0
assert dy > 0
assert dt > 0
outputHeight = int((vidHeight - filterHeight) / dx) + 1
outputWidth = int((vidWidth - filterWidth) / dy) + 1
outputDur = int((vidDur - filterDur) / dt) + 1
H = N.zeros((batchSize, outputHeight,
outputWidth, outputDur, outputChannels), dtype=V.dtype)
for i in xrange(0, H.shape[0]):
for j in xrange(0, H.shape[4]):
for x in xrange(0, H.shape[1]):
for y in xrange(0, H.shape[2]):
for t in xrange(0, H.shape[3]):
H[i, x, y, t, j] = b[j]
for k in xrange(0, filterHeight):
for l in xrange(0, filterWidth):
for m in xrange(0, filterDur):
for z in xrange(0, inputChannels):
w = W[j, k, l, m, z]
v = V[i, d[0] * x + k, d[1] * y + l, d[2] * t + m, z]
H[i, x, y, t, j] += w * v
return H
|
data/OctavianLee/Pywechat/pywechat/services/basic.py
|
import json
import time
import requests
from pywechat.excepts import WechatError
class Basic(object):
"""The basic class of all services.
Attributes:
app_id: the app id of a wechat account.
app_secret: the app secret of a wechat account.
access_token: the access token requests from the wechat.
token_expires_time: the time that the access token will expire.
"""
def __init__(self, app_id, app_secret):
"""Initializes the service."""
self.__app_id = app_id
self.__app_secret = app_secret
self.__access_token = self.access_token
self.__token_expires_at = None
@property
def access_token(self):
'''Gets the access token.'''
if self.__access_token and self.__token_expires_at:
if self.__token_expires_at - time.time() > 60:
return self.__access_token
self._grant_access_token()
return self.__access_token
def _send_request(self, method, url, **kwargs):
"""Sends a request to the server.
Args:
method: the method of request.('get', 'post', etc)
url: the request's url.
kwargs: the data will send to.
Returns:
the json data gets from the server.
Raises:
WechatError: to raise the exception if it contains the error.
"""
if not kwargs.get('params'):
kwargs['params'] = {
"access_token": self.access_token
}
if kwargs.get('data'):
data = json.dumps(kwargs['data']).encode('utf-8')
kwargs["data"] = data
request = requests.request(
method=method,
url=url,
**kwargs
)
request.raise_for_status()
json_data = request.json()
self._check_wechat_error(json_data)
return json_data
@classmethod
def _check_wechat_error(cls, json_data):
"""Check whether the data from the plaform of wechat is an error.
Args:
json_data: the json data gained from the wechat.
Raises:
WechatError: to raise the exception if it contains the error.
"""
errcode = json_data.get('errcode')
if errcode and errcode != 0:
raise WechatError(errcode, json_data.get('errmsg'))
def _grant_access_token(self):
"""Gets the access token from wechat.
Public account can use this method with APPID and APPSecret to gain
the access token.
Link:
https://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html
Returns:
the json data.Example:
{"access_token":"ACCESS_TOKEN","expires_in":7200}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/cgi-bin/token'
params = {
"grant_type": "client_credential",
"appid": self.__app_id,
"secret": self.__app_secret
}
json_data = self._send_request('get', url, params=params)
self.__access_token = json_data.get('access_token')
self.__token_expires_at = int(
time.time()) + json_data.get('expires_in')
return json_data
def _get_wechat_server_ips(self):
"""Gets the ip list from wechat.
For the reason of security, it needs the list of ip addresses of wechat
to limit some conditions.
Link:
https://mp.weixin.qq.com/wiki/0/2ad4b6bfd29f30f71d39616c2a0fcedc.html
Returns:
the json data.Example:
{
"ip_list":["127.0.0.1","127.0.0.1"]
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = "https://api.weixin.qq.com/cgi-bin/getcallbackip"
params = {
"access_token": self.access_token
}
json_data = self._send_request('get', url, params=params)
return json_data
|
data/KKBOX/mass/setup.py
|
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
required = f.read().splitlines()
setup(
name='mass',
version='0.1.0',
description='Computer Farm Management',
author='',
author_email='',
packages=find_packages(exclude=['docs', 'contrib', 'tests*']),
include_package_data=True,
setup_requires=[
'versiontools >= 1.8',
],
install_requires=required,
entry_points={
'console_scripts': [
'mass = mass.cli:cli',
],
},
)
del required
|
data/Hyphen-ated/RebirthItemTracker/src/item_tracker.py
|
""" This module handles everything related to the tracker behaviour. """
import json
import time
import urllib2
import logging
from view_controls.view import DrawingTool, Event
from game_objects.item import Item
from game_objects.state import TrackerState, TrackerStateEncoder
from log_parser import LogParser
from options import Options
class IsaacTracker(object):
""" The main class of the program """
def __init__(self, logging_level=logging.INFO, read_timer=1):
self.read_timer = read_timer
self.file_prefix = "../"
self.log = logging.getLogger("tracker")
self.log.addHandler(logging.FileHandler(self.file_prefix + "tracker_log.txt", mode='w'))
self.log.setLevel(logging_level)
with open(self.file_prefix + "items.json", "r") as items_file:
Item.items_info = json.load(items_file)
with open(self.file_prefix + 'version.txt', 'r') as f:
self.tracker_version = f.read()
Options().load_options(self.file_prefix + "options.json")
def __del__(self):
Options().save_options(self.file_prefix + "options.json")
def check_for_update(self):
""" Returns text to put in the title bar """
try:
latest = "https://api.github.com/repos/Hyphen-ated/RebirthItemTracker/releases/latest"
github_info_json = urllib2.urlopen(latest).read()
info = json.loads(github_info_json)
latest_version = info["name"]
title_text = " v" + self.tracker_version
if latest_version != self.tracker_version:
title_text += " (new version available)"
return title_text
except Exception as e:
self.log.debug("Failed to find update info: " + e.message)
return ""
def run(self):
""" The main routine which controls everything """
update_notifier = self.check_for_update()
framecount = 0
drawing_tool = DrawingTool(self.file_prefix)
drawing_tool.set_window_title(update_notifier)
parser = LogParser(self.file_prefix, self.tracker_version)
opt = Options()
log = logging.getLogger("tracker")
event_result = None
state = None
read_from_server = opt.read_from_server
write_to_server = opt.write_to_server
state_version = -1
twitch_username = None
new_states_queue = []
screen_error_message = None
while event_result != Event.DONE:
event_result = drawing_tool.handle_events()
if opt.read_from_server != read_from_server\
or opt.twitch_name != twitch_username:
twitch_username = opt.twitch_name
read_from_server = opt.read_from_server
new_states_queue = []
if read_from_server:
state_version = -1
state = None
drawing_tool.set_window_title(update_notifier, watching_player=twitch_username, updates_queued=len(new_states_queue))
else:
drawing_tool.set_window_title(update_notifier)
if opt.write_to_server and opt.write_to_server != write_to_server:
write_to_server = True
drawing_tool.set_window_title(update_notifier, uploading=True)
if not opt.write_to_server:
write_to_server = False
if opt.read_from_server:
update_timer = 2
else:
update_timer = self.read_timer
if event_result == Event.OPTIONS_UPDATE:
framecount = 0
screen_error_message = None
if state is not None:
state.modified = True
if (framecount % int(Options().framerate_limit * update_timer) == 0):
if opt.read_from_server:
base_url = opt.trackerserver_url + "/tracker/api/user/" + opt.twitch_name
json_dict = None
try:
json_version = urllib2.urlopen(base_url + "/version").read()
if int(json_version) > state_version:
json_state = urllib2.urlopen(base_url).read()
json_dict = json.loads(json_state)
new_state = TrackerState.from_json(json_dict)
if new_state is None:
raise Exception
state_version = int(json_version)
new_states_queue.append((state_version, new_state))
drawing_tool.set_window_title(update_notifier, watching_player=twitch_username, updates_queued=len(new_states_queue), read_delay=opt.read_delay)
except Exception:
state = None
log.error("Couldn't load state from server")
import traceback
log.error(traceback.format_exc())
if json_dict is not None:
their_version = ""
if "tracker_version" in json_dict:
their_version = json_dict["tracker_version"]
else:
their_version = "0.10-beta1"
if their_version != self.tracker_version:
screen_error_message = "They are using tracker version " + their_version + " but you have " + self.tracker_version
else:
force_draw = state and state.modified
state = parser.parse()
if force_draw:
state.modified = True
if write_to_server and not opt.trackerserver_authkey:
screen_error_message = "Your authkey is blank. Get a new authkey in the options menu and paste it into the authkey text field."
if state is not None and write_to_server and state.modified and screen_error_message is None:
opener = urllib2.build_opener(urllib2.HTTPHandler)
put_url = opt.trackerserver_url + "/tracker/api/update/" + opt.trackerserver_authkey
json_string = json.dumps(state, cls=TrackerStateEncoder, sort_keys=True)
request = urllib2.Request(put_url,
data=json_string)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'PUT'
try:
result = opener.open(request)
result_json = json.loads(result.read())
updated_user = result_json["updated_user"]
if updated_user is None:
screen_error_message = "The server didn't recognize you. Try getting a new authkey in the options menu."
else:
screen_error_message = None
except Exception as e:
import traceback
errmsg = traceback.format_exc()
log.error("ERROR: Couldn't send item info to server")
log.error(errmsg)
screen_error_message = "ERROR: Couldn't send item info to server, check tracker_log.txt"
if len(new_states_queue) > 0:
(state_timestamp, new_state) = new_states_queue[0]
current_timestamp = int(time.time())
if current_timestamp - state_timestamp >= opt.read_delay or state is None:
state = new_state
new_states_queue.pop(0)
drawing_tool.set_window_title(update_notifier, watching_player=twitch_username, updates_queued=len(new_states_queue), read_delay=opt.read_delay)
if state is None and screen_error_message is None:
if read_from_server:
screen_error_message = "Unable to read state from server. Please verify your options setup and tracker_log.txt"
else:
screen_error_message = "log.txt not found. Put the RebirthItemTracker folder inside the isaac folder, next to log.txt"
if screen_error_message is not None:
drawing_tool.write_error_message(screen_error_message)
else:
drawing_tool.draw_state(state)
drawing_tool.tick()
framecount += 1
drawing_tool.save_window_position()
def main():
""" Main """
try:
rt = IsaacTracker()
rt.run()
except Exception:
import traceback
errmsg = traceback.format_exc()
print(errmsg)
logging.getLogger("tracker").error(errmsg)
if __name__ == "__main__":
main()
|
data/StackStorm/st2/st2reactor/tests/unit/test_hash_partitioner.py
|
import math
from random_words import RandomWords
from st2reactor.container.hash_partitioner import HashPartitioner, Range
from st2tests import config
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
PACK = 'generic'
FIXTURES_1 = {
'sensors': ['sensor1.yaml', 'sensor2.yaml', 'sensor3.yaml']
}
class HashPartitionerTest(DbTestCase):
models = None
@classmethod
def setUpClass(cls):
super(HashPartitionerTest, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(
fixtures_pack=PACK, fixtures_dict=FIXTURES_1)
config.parse_args()
def test_full_range_hash_partitioner(self):
partitioner = HashPartitioner('node1', 'MIN..MAX')
sensors = partitioner.get_sensors()
self.assertEqual(len(sensors), 3, 'Expected all sensors')
def test_multi_range_hash_partitioner(self):
range_third = int(Range.RANGE_MAX_VALUE / 3)
range_two_third = range_third * 2
hash_ranges = \
'MIN..{range_third}|{range_third}..{range_two_third}|{range_two_third}..MAX'.format(
range_third=range_third, range_two_third=range_two_third)
partitioner = HashPartitioner('node1', hash_ranges)
sensors = partitioner.get_sensors()
self.assertEqual(len(sensors), 3, 'Expected all sensors')
def test_split_range_hash_partitioner(self):
range_mid = int(Range.RANGE_MAX_VALUE / 2)
partitioner = HashPartitioner('node1', 'MIN..%s' % range_mid)
sensors1 = partitioner.get_sensors()
partitioner = HashPartitioner('node2', '%s..MAX' % range_mid)
sensors2 = partitioner.get_sensors()
self.assertEqual(len(sensors1) + len(sensors2), 3, 'Expected all sensors')
def test_hash_effectiveness(self):
range_third = int(Range.RANGE_MAX_VALUE / 3)
partitioner1 = HashPartitioner('node1', 'MIN..%s' % range_third)
partitioner2 = HashPartitioner('node2', '%s..%s' % (range_third, range_third + range_third))
partitioner3 = HashPartitioner('node2', '%s..MAX' % (range_third + range_third))
refs_count = 1000
refs = self._generate_refs(count=refs_count)
p1_count = 0
p2_count = 0
p3_count = 0
for ref in refs:
if partitioner1._is_in_hash_range(ref):
p1_count += 1
if partitioner2._is_in_hash_range(ref):
p2_count += 1
if partitioner3._is_in_hash_range(ref):
p3_count += 1
self.assertEqual(p1_count + p2_count + p3_count, refs_count,
'Sum should equal all sensors.')
mean = refs_count / 3
variance = float((p1_count - mean)**2 + (p1_count - mean)**2 + (p3_count - mean)**2) / 3
sd = math.sqrt(variance)
self.assertTrue(sd / mean <= 0.2, 'Some values deviate too much from the mean.')
def _generate_refs(self, count=10):
random_word_count = int(math.sqrt(count)) + 1
words = RandomWords().random_words(count=random_word_count)
x_index = 0
y_index = 0
while count > 0:
yield '%s.%s' % (words[x_index], words[y_index])
if y_index < len(words) - 1:
y_index += 1
else:
x_index += 1
y_index = 0
count -= 1
return
|
data/OrbitzWorldwide/droned/droned/services/janitizer.py
|
from kitt.interfaces import moduleProvides, IDroneDService
moduleProvides(IDroneDService)
from kitt.util import dictwrapper
import config
SERVICENAME = 'janitizer'
SERVICECONFIG = dictwrapper({
'JANITIZE': {
config.LOG_DIR: [
('.*.log.\d+.*', int(7*len(config.AUTOSTART_SERVICES))),
],
}
})
import os, re, time
from twisted.application.service import Service
from twisted.internet import defer, task
from droned.logging import logWithContext
from kitt.decorators import synchronizedDeferred, deferredAsThread
import copy
__doc__ = """
config [JANITOR_DICT, AUTOSTART_SERVICES]
This service when properly configured will keep the filesystem
cleaned up when running.
keep the most recent 10 copies of files that match the pattern
Janitizer.garbage = {
'/tmp/example1/log/directory' : [
( 'foo_[a-z].+\.log.*', 10)
]
}
"""
log = logWithContext(type=SERVICENAME)
def ageCompare(f1,f2):
t1 = os.path.getmtime(f1)
t2 = os.path.getmtime(f2)
if t1 > t2: return 1
if t2 == t2: return 0
if t2 < t2: return -1
class Janitizer(Service):
minute = property(lambda foo: 60)
hour = property(lambda foo: 3600)
day = property(lambda foo: 86400)
week = property(lambda f: 604800)
oldfiles = {}
watchDict = property(lambda s: SERVICECONFIG.wrapped.get('JANITIZE',{}))
busy = defer.DeferredLock()
def update(self, watchDict):
"""Inspects occurrence for a watchDict parameter and updates
the internal state of Janitizer
@param watchDict (dict)
return None
"""
tmp = copy.deepcopy(self.watchDict)
tmp.update(watchDict)
SERVICECONFIG.JANITIZE = tmp
@synchronizedDeferred(busy)
@deferredAsThread
def garbageCheck(self):
"""Check for file patterns that are removeable"""
watchDict = copy.deepcopy(self.watchDict)
for directory,garbageList in watchDict.iteritems():
if not os.path.exists(directory): continue
for pattern,limit in garbageList:
self.cleanupLinks(directory)
files = [os.path.join(directory,f) for f in os.listdir(directory) \
if re.search(pattern,f)]
files = sorted(files)
if len(files) > int(limit):
log('These files matched:\n\t%s' % '\n\t'.join(files))
while len(files) > int(limit):
oldfile = files.pop(0)
log('Deleting %s' % oldfile)
if os.path.islink(oldfile): continue
if os.path.isdir(oldfile):
for base, dirs, myfiles in os.walk(oldfile, topdown=False):
for name in myfiles:
os.remove(os.path.join(base, name))
for name in dirs:
os.rmdir(os.path.join(base, name))
os.rmdir(oldfile)
else: os.unlink(oldfile)
self.cleanupLinks(directory)
def cleanupLinks(self, directory):
"""cleans broken symlinks
@param directory: (string)
return list
"""
files = [os.path.join(directory,f) for f in os.listdir(directory)]
for f in files[:]:
if not os.path.exists(f):
log('Removing broken symlink %s' % f)
os.unlink(f)
files.remove(f)
return files
def clean_old_files(self, directory, age, recurse=True):
"""mark this directory for cleaning at a certain age
@param directory: (string)
@param age: (float)
@param recurse: (bool)
return None
"""
self.oldfiles[directory] = (age,recurse)
@synchronizedDeferred(busy)
@deferredAsThread
def clean_elderly(self):
"""clean old files in a thread"""
for directory in self.oldfiles:
self.recursive_clean(directory,*self.oldfiles[directory])
def recursive_clean(self, directory, age, recurse):
"""recusively clean a directory
@param directory: (string)
@param age: (float)
@param recurse: (bool)
return bool
"""
try: data = map(lambda n: os.path.join(directory,n), os.listdir(directory))
except:
log('could not find directory %s' % directory)
return
for node in data:
if os.path.isdir(node) and recurse:
empty = self.recursive_clean(node,age,recurse)
if empty:
try: os.rmdir(node)
except: log('could not remove directory: %s' % node)
continue
if os.path.isdir(node): continue
if (time.time() - os.stat(node).st_mtime) > age:
try: os.remove(node)
except: log('could not remove file: %s' % node)
return bool(os.listdir(directory))
def startService(self):
"""Start Janitizer Service"""
self.GARBAGE_CHECK = task.LoopingCall(self.garbageCheck)
self.ELDERLY_CHECK = task.LoopingCall(self.clean_elderly)
Service.startService(self)
self.GARBAGE_CHECK.start(self.minute * 20)
self.ELDERLY_CHECK.start(self.minute)
def stopService(self):
"""Stop All Janitizer Service"""
try:
if self.GARBAGE_CHECK.running:
self.GARBAGE_CHECK.stop()
if self.ELDERLY_CHECK.running:
self.ELDERLY_CHECK.stop()
except: pass
Service.stopService(self)
parentService = None
service = None
def update(watchDict):
global service
if not running():
raise AssertionError('janitizer service is not running')
return service.update(watchDict)
def install(_parentService):
global parentService
parentService = _parentService
def start():
global service
if not running():
service = Janitizer()
service.setName(SERVICENAME)
service.setServiceParent(parentService)
def stop():
global service
if running():
service.disownServiceParent()
service.stopService()
service = None
def running():
return bool(service) and service.running
__all__ = ['install', 'start', 'stop', 'running']
|
data/Sage-Bionetworks/synapsePythonClient/tests/unit/unit_test_Evaluation.py
|
from nose.tools import assert_raises
from synapseclient.evaluation import Evaluation, Submission
def test_Evaluation():
"""Test the construction and accessors of Evaluation objects."""
assert_raises(ValueError, Evaluation, name='foo', description='bar', status='BAH')
assert_raises(ValueError, Evaluation, name='foo', description='bar', status='OPEN', contentSource='a')
ev = Evaluation(name='foobar2', description='bar', status='OPEN', contentSource='syn1234')
assert(ev['name']==ev.name)
assert(ev['description']==ev.description)
assert(ev['status']==ev.status)
def test_Submission():
"""Test the construction and accessors of Evaluation objects."""
assert_raises(KeyError, Submission, foo='bar')
|
data/Yelp/yelp-python/yelp/obj/span.py
|
from yelp.obj.response_object import ResponseObject
class Span(ResponseObject):
_fields = [
'latitude_delta',
'longitude_delta'
]
|
data/Kami/python-yubico-client/yubico_client/yubico.py
|
import re
import os
import sys
import time
import hmac
import base64
import hashlib
import threading
import logging
import requests
from yubico_client.otp import OTP
from yubico_client.yubico_exceptions import (StatusCodeError,
InvalidClientIdError,
InvalidValidationResponse,
SignatureVerificationError)
from yubico_client.py3 import b
from yubico_client.py3 import urlencode
from yubico_client.py3 import unquote
logger = logging.getLogger('yubico.client')
COMMON_CA_LOCATIONS = [
'/usr/local/lib/ssl/certs/ca-certificates.crt',
'/usr/local/ssl/certs/ca-certificates.crt',
'/usr/local/share/curl/curl-ca-bundle.crt',
'/usr/local/etc/openssl/cert.pem',
'/opt/local/lib/ssl/certs/ca-certificates.crt',
'/opt/local/ssl/certs/ca-certificates.crt',
'/opt/local/share/curl/curl-ca-bundle.crt',
'/opt/local/etc/openssl/cert.pem',
'/usr/lib/ssl/certs/ca-certificates.crt',
'/usr/ssl/certs/ca-certificates.crt',
'/usr/share/curl/curl-ca-bundle.crt',
'/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/cert.pem',
'/etc/pki/CA/cacert.pem',
'C:\Windows\curl-ca-bundle.crt',
'C:\Windows\ca-bundle.crt',
'C:\Windows\cacert.pem'
]
DEFAULT_API_URLS = ('https://api.yubico.com/wsapi/2.0/verify',
'https://api2.yubico.com/wsapi/2.0/verify',
'https://api3.yubico.com/wsapi/2.0/verify',
'https://api4.yubico.com/wsapi/2.0/verify',
'https://api5.yubico.com/wsapi/2.0/verify')
DEFAULT_TIMEOUT = 10
DEFAULT_MAX_TIME_WINDOW = 5
BAD_STATUS_CODES = ['BAD_OTP', 'REPLAYED_OTP', 'BAD_SIGNATURE',
'MISSING_PARAMETER', 'OPERATION_NOT_ALLOWED',
'BACKEND_ERROR', 'NOT_ENOUGH_ANSWERS',
'REPLAYED_REQUEST']
class Yubico(object):
def __init__(self, client_id, key=None, verify_cert=True,
translate_otp=True, api_urls=DEFAULT_API_URLS,
ca_certs_bundle_path=None):
if ca_certs_bundle_path and \
not self._is_valid_ca_bundle_file(ca_certs_bundle_path):
raise ValueError(('Invalid value provided for ca_certs_bundle_path'
' argument'))
self.client_id = client_id
if key is not None:
key = base64.b64decode(key.encode('ascii'))
self.key = key
self.verify_cert = verify_cert
self.translate_otp = translate_otp
self.api_urls = self._init_request_urls(api_urls=api_urls)
self.ca_certs_bundle_path = ca_certs_bundle_path
def verify(self, otp, timestamp=False, sl=None, timeout=None,
return_response=False):
"""
Verify a provided OTP.
:param otp: OTP to verify.
:type otp: ``str``
:param timestamp: True to include request timestamp and session counter
in the response. Defaults to False.
:type timestamp: ``bool``
:param sl: A value indicating percentage of syncing required by client.
:type sl: ``int`` or ``str``
:param timeout: Number of seconds to wait for sync responses.
:type timeout: ``int``
:param return_response: True to return a response object instead of the
status code. Defaults to False.
:type return_response: ``bool``
:return: True is the provided OTP is valid, False if the
REPLAYED_OTP status value is returned or the response message signature
verification failed and None for the rest of the status values.
"""
ca_bundle_path = self._get_ca_bundle_path()
otp = OTP(otp, self.translate_otp)
rand_str = b(os.urandom(30))
nonce = base64.b64encode(rand_str, b('xz'))[:25].decode('utf-8')
query_string = self.generate_query_string(otp.otp, nonce, timestamp,
sl, timeout)
threads = []
timeout = timeout or DEFAULT_TIMEOUT
for url in self.api_urls:
thread = URLThread('%s?%s' % (url, query_string), timeout,
self.verify_cert, ca_bundle_path)
thread.start()
threads.append(thread)
start_time = time.time()
while threads and (start_time + timeout) > time.time():
for thread in threads:
if not thread.is_alive():
if thread.exception:
raise thread.exception
elif thread.response:
status = self.verify_response(thread.response,
otp.otp, nonce,
return_response)
if status:
if return_response:
return status
else:
return True
threads.remove(thread)
time.sleep(0.1)
raise Exception('NO_VALID_ANSWERS')
def verify_multi(self, otp_list, max_time_window=DEFAULT_MAX_TIME_WINDOW,
sl=None, timeout=None):
"""
Verify a provided list of OTPs.
:param max_time_window: Maximum number of seconds which can pass
between the first and last OTP generation for
the OTP to still be considered valid.
:type max_time_window: ``int``
"""
otps = []
for otp in otp_list:
otps.append(OTP(otp, self.translate_otp))
if len(otp_list) < 2:
raise ValueError('otp_list needs to contain at least two OTPs')
device_ids = set()
for otp in otps:
device_ids.add(otp.device_id)
if len(device_ids) != 1:
raise Exception('OTPs contain different device ids')
for otp in otps:
response = self.verify(otp.otp, True, sl, timeout,
return_response=True)
if not response:
return False
otp.timestamp = int(response['timestamp'])
count = len(otps)
delta = otps[count - 1].timestamp - otps[0].timestamp
delta = delta / 8
if delta < 0:
raise Exception('delta is smaller than zero. First OTP appears to '
'be older than the last one')
if delta > max_time_window:
raise Exception(('More than %s seconds have passed between '
'generating the first and the last OTP.') %
(max_time_window))
return True
def verify_response(self, response, otp, nonce, return_response=False):
"""
Returns True if the OTP is valid (status=OK) and return_response=False,
otherwise (return_response = True) it returns the server response as a
dictionary.
Throws an exception if the OTP is replayed, the server response message
verification failed or the client id is invalid, returns False
otherwise.
"""
try:
status = re.search(r'status=([A-Z0-9_]+)', response) \
.groups()
if len(status) > 1:
message = 'More than one status= returned. Possible attack!'
raise InvalidValidationResponse(message, response)
status = status[0]
except (AttributeError, IndexError):
return False
signature, parameters = \
self.parse_parameters_from_response(response)
if self.key:
generated_signature = \
self.generate_message_signature(parameters)
if signature != generated_signature:
logger.warn("signature mismatch for parameters=%r", parameters)
raise SignatureVerificationError(generated_signature,
signature)
param_dict = self.get_parameters_as_dictionary(parameters)
if 'otp' in param_dict and param_dict['otp'] != otp:
message = 'Unexpected OTP in response. Possible attack!'
raise InvalidValidationResponse(message, response, param_dict)
if 'nonce' in param_dict and param_dict['nonce'] != nonce:
message = 'Unexpected nonce in response. Possible attack!'
raise InvalidValidationResponse(message, response, param_dict)
if status == 'OK':
if return_response:
return param_dict
else:
return True
elif status == 'NO_SUCH_CLIENT':
raise InvalidClientIdError(self.client_id)
elif status == 'REPLAYED_OTP':
raise StatusCodeError(status)
return False
def generate_query_string(self, otp, nonce, timestamp=False, sl=None,
timeout=None):
"""
Returns a query string which is sent to the validation servers.
"""
data = [('id', self.client_id),
('otp', otp),
('nonce', nonce)]
if timestamp:
data.append(('timestamp', '1'))
if sl is not None:
if sl not in range(0, 101) and sl not in ['fast', 'secure']:
raise Exception('sl parameter value must be between 0 and '
'100 or string "fast" or "secure"')
data.append(('sl', sl))
if timeout:
data.append(('timeout', timeout))
query_string = urlencode(data)
if self.key:
hmac_signature = self.generate_message_signature(query_string)
hmac_signature = hmac_signature
query_string += '&h=%s' % (hmac_signature.replace('+', '%2B'))
return query_string
def generate_message_signature(self, query_string):
"""
Returns a HMAC-SHA-1 signature for the given query string.
http://goo.gl/R4O0E
"""
pairs = query_string.split('&')
pairs = [pair.split('=', 1) for pair in pairs]
pairs_sorted = sorted(pairs)
pairs_string = '&' . join(['=' . join(pair) for pair in pairs_sorted])
digest = hmac.new(self.key, b(pairs_string), hashlib.sha1).digest()
signature = base64.b64encode(digest).decode('utf-8')
return signature
def parse_parameters_from_response(self, response):
"""
Returns a response signature and query string generated from the
server response. 'h' aka signature argument is stripped from the
returned query string.
"""
lines = response.splitlines()
pairs = [line.strip().split('=', 1) for line in lines if '=' in line]
pairs = sorted(pairs)
signature = ([unquote(v) for k, v in pairs if k == 'h'] or [None])[0]
query_string = '&' . join([k + '=' + v for k, v in pairs if k != 'h'])
return (signature, query_string)
def get_parameters_as_dictionary(self, query_string):
""" Returns query string parameters as a dictionary. """
pairs = (x.split('=', 1) for x in query_string.split('&'))
return dict((k, unquote(v)) for k, v in pairs)
def _init_request_urls(self, api_urls):
"""
Returns a list of the API URLs.
"""
if not isinstance(api_urls, (str, list, tuple)):
raise TypeError('api_urls needs to be string or iterable!')
if isinstance(api_urls, str):
api_urls = (api_urls,)
api_urls = list(api_urls)
for url in api_urls:
if not url.startswith('http://') and \
not url.startswith('https://'):
raise ValueError(('URL "%s" contains an invalid or missing'
' scheme' % (url)))
return list(api_urls)
def _get_ca_bundle_path(self):
"""
Return a path to the CA bundle which is used for verifying the hosts
SSL certificate.
"""
if self.ca_certs_bundle_path:
return self.ca_certs_bundle_path
for file_path in COMMON_CA_LOCATIONS:
if self._is_valid_ca_bundle_file(file_path=file_path):
return file_path
return None
def _is_valid_ca_bundle_file(self, file_path):
return os.path.exists(file_path) and os.path.isfile(file_path)
class URLThread(threading.Thread):
def __init__(self, url, timeout, verify_cert, ca_bundle_path=None):
super(URLThread, self).__init__()
self.url = url
self.timeout = timeout
self.verify_cert = verify_cert
self.ca_bundle_path = ca_bundle_path
self.exception = None
self.request = None
self.response = None
def run(self):
logger.debug('Sending HTTP request to %s (thread=%s)' % (self.url,
self.name))
verify = self.verify_cert
if self.ca_bundle_path is not None:
verify = self.ca_bundle_path
logger.debug('Using custom CA bunde: %s' % (self.ca_bundle_path))
try:
self.request = requests.get(url=self.url, timeout=self.timeout,
verify=verify)
self.response = self.request.content.decode('utf-8')
except requests.exceptions.SSLError:
e = sys.exc_info()[1]
self.exception = e
self.response = None
except Exception:
e = sys.exc_info()[1]
logger.error('Failed to retrieve response: ' + str(e))
self.response = None
args = (self.url, self.name, self.response)
logger.debug('Received response from %s (thread=%s): %s' % args)
|
data/Robpol86/colorclass/colorclass/parse.py
|
"""Parse color markup tags into ANSI escape sequences."""
import re
from colorclass.codes import ANSICodeMapping, BASE_CODES
CODE_GROUPS = (
tuple(set(str(i) for i in BASE_CODES.values() if i and (40 <= i <= 49 or 100 <= i <= 109))),
tuple(set(str(i) for i in BASE_CODES.values() if i and (30 <= i <= 39 or 90 <= i <= 99))),
('1', '22'), ('2', '22'), ('3', '23'), ('4', '24'), ('5', '25'), ('6', '26'), ('7', '27'), ('8', '28'), ('9', '29'),
)
RE_ANSI = re.compile(r'(\033\[([\d;]+)m)')
RE_COMBINE = re.compile(r'\033\[([\d;]+)m\033\[([\d;]+)m')
RE_SPLIT = re.compile(r'(\033\[[\d;]+m)')
def prune_overridden(ansi_string):
"""Remove color codes that are rendered ineffective by subsequent codes in one escape sequence then sort codes.
:param str ansi_string: Incoming ansi_string with ANSI color codes.
:return: Color string with pruned color sequences.
:rtype: str
"""
multi_seqs = set(p for p in RE_ANSI.findall(ansi_string) if ';' in p[1])
for escape, codes in multi_seqs:
r_codes = list(reversed(codes.split(';')))
try:
r_codes = r_codes[:r_codes.index('0') + 1]
except ValueError:
pass
for group in CODE_GROUPS:
for pos in reversed([i for i, n in enumerate(r_codes) if n in group][1:]):
r_codes.pop(pos)
reduced_codes = ';'.join(sorted(r_codes, key=int))
if codes != reduced_codes:
ansi_string = ansi_string.replace(escape, '\033[' + reduced_codes + 'm')
return ansi_string
def parse_input(tagged_string, disable_colors):
"""Perform the actual conversion of tags to ANSI escaped codes.
Provides a version of the input without any colors for len() and other methods.
:param str tagged_string: The input unicode value.
:param bool disable_colors: Strip all colors in both outputs.
:return: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors.
:rtype: tuple
"""
codes = ANSICodeMapping(tagged_string)
output_colors = getattr(tagged_string, 'value_colors', tagged_string)
for tag, replacement in (('{' + k + '}', '' if v is None else '\033[%dm' % v) for k, v in codes.items()):
output_colors = output_colors.replace(tag, replacement)
output_no_colors = RE_ANSI.sub('', output_colors)
if disable_colors:
return output_no_colors, output_no_colors
while True:
simplified = RE_COMBINE.sub(r'\033[\1;\2m', output_colors)
if simplified == output_colors:
break
output_colors = simplified
output_colors = prune_overridden(output_colors)
previous_escape = None
segments = list()
for item in (i for i in RE_SPLIT.split(output_colors) if i):
if RE_SPLIT.match(item):
if item != previous_escape:
segments.append(item)
previous_escape = item
else:
segments.append(item)
output_colors = ''.join(segments)
return output_colors, output_no_colors
|
data/IanLewis/kay/kay/lib/werkzeug/datastructures.py
|
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
import mimetypes
from werkzeug._internal import _proxy_repr, _missing, _empty_stream
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in mapping.iteritems(multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in mapping.iteritems():
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
__repr__ = _proxy_repr(list)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (self.items(multi=True),)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
pop = calls_update('pop')
popitem = calls_update('popitem')
setdefault = calls_update('setdefault')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
KeyError = None
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in mapping.iterlists()))
elif isinstance(mapping, dict):
tmp = {}
for key, value in mapping.iteritems():
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __iter__(self):
return self.iterkeys()
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise self.KeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return a list of ``(key, value)`` pairs.
:param multi: If set to `True` the list returned will have a
pair for each value of each key. Otherwise it
will only contain pairs for the first value of
each key.
:return: a :class:`list`
"""
return list(self.iteritems(multi))
def lists(self):
"""Return a list of ``(key, value)`` pairs, where values is the list of
all values associated with the key.
:return: a :class:`list`
"""
return list(self.iterlists())
def values(self):
"""Returns a list of the first value on every key's value list.
:return: a :class:`list`.
"""
return [self[key] for key in self.iterkeys()]
def listvalues(self):
"""Return a list of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
:return: a :class:`list`
"""
return list(self.iterlistvalues())
def iteritems(self, multi=False):
"""Like :meth:`items` but returns an iterator."""
for key, values in dict.iteritems(self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def iterlists(self):
"""Return a list of all values associated with a key.
:return: a class:`list`
"""
for key, values in dict.iteritems(self):
yield key, list(values)
def itervalues(self):
"""Like :meth:`values` but returns an iterator."""
for values in dict.itervalues(self):
yield values[0]
def iterlistvalues(self):
"""like :meth:`listvalues` but returns an iterator."""
for values in dict.itervalues(self):
yield list(values)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(self.iteritems())
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError, e:
if default is not _missing:
return default
raise self.KeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError, e:
raise self.KeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError, e:
raise self.KeyError(str(e))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.items(multi=True))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
KeyError = None
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = self.iteritems(multi=True)
iter2 = other.iteritems(multi=True)
try:
for k1, v1 in iter1:
k2, v2 = iter2.next()
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
iter2.next()
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in self.iterlists():
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (self.items(multi=True),)
def __getstate__(self):
return self.items(multi=True)
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise self.KeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def iterkeys(self):
return (key for key, value in self.iteritems())
def itervalues(self):
return (value for key, value in self.iteritems())
def iteritems(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def iterlists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def iterlistvalues(self):
for key, values in self.iterlists():
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError, e:
if default is not _missing:
return default
raise self.KeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError, e:
raise self.KeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError, e:
raise self.KeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
if not kw:
return value
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage. To create a :class:`Headers`
object that uses as internal storage the list or list-like object you
can use the :meth:`linked` class method.
:param defaults: The list of default values for the :class:`Headers`.
"""
KeyError = None
def __init__(self, defaults=None, _list=None):
if _list is None:
_list = []
self._list = _list
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
@classmethod
def linked(cls, headerlist):
"""Create a new :class:`Headers` object that uses the list of headers
passed as internal storage:
>>> headerlist = [('Content-Length', '40')]
>>> headers = Headers.linked(headerlist)
>>> headers['Content-Type'] = 'text/html'
>>> headerlist
[('Content-Length', '40'), ('Content-Type', 'text/html')]
:param headerlist: The list of headers the class is linked to.
:return: new linked :class:`Headers` object.
"""
return cls(_list=headerlist)
def __getitem__(self, key, _index_operation=True):
if _index_operation:
if isinstance(key, (int, long)):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
raise self.KeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self.__getitem__(key, _index_operation=False)
except KeyError:
return default
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def iteritems(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def iterkeys(self, lower=False):
for key, _ in self.iteritems(lower):
yield key
def itervalues(self):
for _, value in self.iteritems():
yield value
def keys(self, lower=False):
return list(self.iterkeys(lower))
def values(self):
return list(self.itervalues())
def items(self, lower=False):
return list(self.iteritems(lower))
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iterable.iteritems():
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (int, long, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, (int, long)):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _index_operation=False)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
self._list.append((_key, _options_header_vkw(_value, kw)))
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
lc_key = _key.lower()
_value = _options_header_vkw(_value, kw)
for idx, (old_key, old_value) in enumerate(self._list):
if old_key.lower() == lc_key:
self._list[idx] = (_key, _value)
break
else:
return self.add(_key, _value)
self._list[idx + 1:] = [(k, v) for k, v in self._list[idx + 1:]
if k.lower() != lc_key]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, int, long)):
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='utf-8'):
"""Convert the headers into a list and converts the unicode header
items to the specified charset.
:return: list
"""
result = []
for k, v in self:
if isinstance(v, unicode):
v = v.encode(charset)
else:
v = str(v)
result.append((k, v))
return result
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self, charset='utf-8'):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_list(charset):
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
@classmethod
def linked(cls, environ):
raise TypeError('%r object is always linked to environment, '
'no separate initializer' % cls.__name__)
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _index_operation=False):
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return self.environ[key]
return self.environ['HTTP_' + key]
def __len__(self):
return len(list(iter(self)))
def __iter__(self):
for key, value in self.environ.iteritems():
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug import MultiDict, CombinedMultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise self.KeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return list(rv)
def iteritems(self, multi=False):
found = set()
for d in self.dicts:
for key, value in d.iteritems(multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def itervalues(self):
for key, value in self.iteritems():
yield value
def values(self):
return list(self.itervalues())
def items(self, multi=False):
return list(self.iteritems(multi))
def iterlists(self):
rv = {}
for d in self.dicts:
for key, values in d.iterlists():
rv.setdefault(key, []).extend(values)
return rv.iteritems()
def lists(self):
return list(self.iterlists())
def iterlistvalues(self):
return (x[0] for x in self.lists())
def listvalues(self):
return list(self.iterlistvalues())
def iterkeys(self):
return iter(self.keys())
__iter__ = iterkeys
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
self[name] = file
return
if isinstance(file, basestring):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
self[name] = FileStorage(file, filename, name, content_type)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
__repr__ = _proxy_repr(dict)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, basestring):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, basestring):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Return a list of the values, not the qualities."""
return list(self.itervalues())
def itervalues(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(client_item, server_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags."""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in d.iteritems()
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type='application/octet-stream', content_length=-1,
headers=None):
self.name = name
self.stream = stream or _empty_stream
self.filename = filename or getattr(stream, 'name', None)
self.content_type = content_type
self.content_length = content_length
if headers is None:
headers = Headers()
self.headers = headers
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, basestring):
dst = file(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag
from werkzeug.exceptions import BadRequest
for _cls in MultiDict, OrderedMultiDict, CombinedMultiDict, Headers, \
EnvironHeaders:
_cls.KeyError = BadRequest.wrap(KeyError, _cls.__name__ + '.KeyError')
del _cls
|
data/Zeeker/sublime-SessionManager/json/decoder.py
|
import json
import sublime
from ..modules import session
def _objectify(s):
if isinstance(s, str):
return json.loads(s)
return s
class SessionDecoder(json.JSONDecoder):
def decode(self, s):
o = _objectify(s)
try:
name = o["name"]
windows = [
WindowDecoder.decode(self, w) for w in o["windows"]
]
except NameError:
pass
else:
return session.Session(name, windows)
return json.JSONDecoder.decode(self, o)
class WindowDecoder(json.JSONDecoder):
def decode(self, s):
o = _objectify(s)
try:
project = o["project"]
project_path = o["project_path"]
views = [
ViewDecoder.decode(self, view) for view in o["views"]
]
except NameError:
pass
else:
return session.Window(project, project_path, views)
return json.JSONDecoder.decode(self, o)
class ViewDecoder(json.JSONDecoder):
def decode(self, s):
o = _objectify(s)
try:
file_path = o["file_path"]
active = o["active"]
sel_regions = [
RegionDecoder.decode(self, region) for region in o["sel_regions"]
]
visible_region = RegionDecoder.decode(self, o["visible_region"])
except NameError:
pass
else:
return session.View(file_path, active, sel_regions, visible_region)
return json.JSONDecoder.decode(self, o)
class RegionDecoder(json.JSONDecoder):
def decode(self, s):
o = _objectify(s)
try:
a = o[0]
b = o[1]
except IndexError:
pass
else:
return sublime.Region(a, b)
return json.JSONDecoder.decode(self, o)
|
data/ManiacalLabs/BiblioPixel/bibliopixel/drivers/network.py
|
from driver_base import DriverBase
import socket
import sys
import time
import os
os.sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import log
class CMDTYPE:
SETUP_DATA = 1
PIXEL_DATA = 2
BRIGHTNESS = 3
class RETURN_CODES:
SUCCESS = 255
ERROR = 0
ERROR_SIZE = 1
ERROR_UNSUPPORTED = 2
class DriverNetwork(DriverBase):
"""Driver for communicating with another device on the network."""
def __init__(self, num=0, width=0, height=0, host="localhost", port=3142):
super(DriverNetwork, self).__init__(num, width, height)
self._host = host
self._port = port
def _generateHeader(self, cmd, size):
packet = bytearray()
packet.append(cmd)
packet.append(size & 0xFF)
packet.append(size >> 8)
return packet
def _connect(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self._host, self._port))
return s
except socket.gaierror:
error = "Unable to connect to or resolve host: {}".format(
self._host)
log.error(error)
raise IOError(error)
def update(self, data):
try:
s = self._connect()
count = self.bufByteCount
packet = self._generateHeader(CMDTYPE.PIXEL_DATA, count)
packet.extend(data)
s.sendall(packet)
resp = ord(s.recv(1))
s.close()
if resp != RETURN_CODES.SUCCESS:
log.warning("Bytecount mismatch! %s", resp)
except Exception as e:
log.exception(e)
error = "Problem communicating with network receiver!"
log.error(error)
raise IOError(error)
def setMasterBrightness(self, brightness):
packet = self._generateHeader(CMDTYPE.BRIGHTNESS, 1)
packet.append(brightness)
s = self._connect()
s.sendall(packet)
resp = ord(s.recv(1))
if resp != RETURN_CODES.SUCCESS:
return False
else:
return True
MANIFEST = [
{
"id": "network",
"class": DriverNetwork,
"type": "driver",
"display": "Network",
"desc": "Sends pixel data over the network to a reciever.",
"params": [{
"id": "num",
"label": "
"type": "int",
"default": 0,
"min": 0,
"help": "Total pixels in display. May use Width AND Height instead."
}, {
"id": "width",
"label": "Width",
"type": "int",
"default": 0,
"min": 0,
"help": "Width of display. Set if using a matrix."
}, {
"id": "height",
"label": "Height",
"type": "int",
"default": 0,
"min": 0,
"help": "Height of display. Set if using a matrix."
}, {
"id": "host",
"label": "Pixel Size",
"type": "str",
"default": "localhost",
"help": "Receiver host to connect to."
}, {
"id": "port",
"label": "Port",
"type": "int",
"default": 3142,
"help": "Port to connect to."
}]
}
]
|
data/JRBANCEL/Chromagnon/chromagnon/csvOutput.py
|
"""
CSV Output Module
"""
import csv
import sys
def csvOutput(queryResult, separator=',', quote='"'):
"""
Display the data according to csv format
"""
csvWriter = csv.writer(sys.stdout, delimiter=separator, quotechar=quote,
quoting=csv.QUOTE_MINIMAL)
for line in queryResult:
csvWriter.writerow(line)
|
data/Koed00/django-q/django_q/management/commands/qcluster.py
|
from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from django_q.cluster import Cluster
class Command(BaseCommand):
help = _("Starts a Django Q Cluster.")
option_list = BaseCommand.option_list + (
make_option('--run-once',
action='store_true',
dest='run_once',
default=False,
help='Run once and then stop.'),
)
def handle(self, *args, **options):
q = Cluster()
q.start()
if options.get('run_once', False):
q.stop()
|
data/PyHDI/veriloggen/tests/extension/pipeline_/draw_graph/test_pipeline_draw_graph.py
|
from __future__ import absolute_import
from __future__ import print_function
import pipeline_draw_graph
expected_verilog = """
module test
(
);
reg CLK;
reg RST;
reg [32-1:0] x;
reg vx;
wire rx;
reg [32-1:0] y;
reg vy;
wire ry;
wire [32-1:0] z;
wire vz;
reg rz;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.x(x),
.vx(vx),
.rx(rx),
.y(y),
.vy(vy),
.ry(ry),
.z(z),
.vz(vz),
.rz(rz)
);
reg reset_done;
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
end
end
initial begin
RST = 0;
reset_done = 0;
x = 0;
y = 0;
vx = 0;
vy = 0;
rz = 0;
RST = 1;
RST = 0;
reset_done = 1;
@(posedge CLK);
$finish;
end
reg [32-1:0] _tmp_0;
reg [32-1:0] _tmp_1;
reg [32-1:0] _tmp_2;
reg [32-1:0] xfsm;
localparam xfsm_init = 0;
localparam xfsm_1 = 1;
localparam xfsm_2 = 2;
always @(posedge CLK) begin
if(RST) begin
xfsm <= xfsm_init;
_tmp_0 <= 0;
end else begin
case(xfsm)
xfsm_init: begin
vx <= 0;
if(reset_done) begin
xfsm <= xfsm_1;
end
end
xfsm_1: begin
vx <= 1;
if(rx) begin
x <= x + 1;
end
if(rx) begin
_tmp_0 <= _tmp_0 + 1;
end
if((_tmp_0 == 10) && rx) begin
xfsm <= xfsm_2;
end
end
xfsm_2: begin
vx <= 0;
end
endcase
end
end
reg [32-1:0] yfsm;
localparam yfsm_init = 0;
localparam yfsm_1 = 1;
localparam yfsm_2 = 2;
always @(posedge CLK) begin
if(RST) begin
yfsm <= yfsm_init;
_tmp_1 <= 0;
end else begin
case(yfsm)
yfsm_init: begin
vy <= 0;
if(reset_done) begin
yfsm <= yfsm_1;
end
end
yfsm_1: begin
vy <= 1;
if(ry) begin
y <= y + 2;
end
if(ry) begin
_tmp_1 <= _tmp_1 + 1;
end
if((_tmp_1 == 10) && ry) begin
yfsm <= yfsm_2;
end
end
yfsm_2: begin
vy <= 0;
end
endcase
end
end
reg [32-1:0] zfsm;
localparam zfsm_init = 0;
localparam zfsm_1 = 1;
localparam zfsm_2 = 2;
localparam zfsm_3 = 3;
localparam zfsm_4 = 4;
localparam zfsm_5 = 5;
localparam zfsm_6 = 6;
localparam zfsm_7 = 7;
localparam zfsm_8 = 8;
localparam zfsm_9 = 9;
localparam zfsm_10 = 10;
localparam zfsm_11 = 11;
localparam zfsm_12 = 12;
localparam zfsm_13 = 13;
always @(posedge CLK) begin
if(RST) begin
zfsm <= zfsm_init;
end else begin
case(zfsm)
zfsm_init: begin
rz <= 0;
if(reset_done) begin
zfsm <= zfsm_1;
end
end
zfsm_1: begin
zfsm <= zfsm_2;
end
zfsm_2: begin
if(vz) begin
rz <= 1;
end
if(vz) begin
zfsm <= zfsm_3;
end
end
zfsm_3: begin
rz <= 0;
zfsm <= zfsm_4;
end
zfsm_4: begin
rz <= 0;
zfsm <= zfsm_5;
end
zfsm_5: begin
rz <= 0;
zfsm <= zfsm_6;
end
zfsm_6: begin
rz <= 0;
zfsm <= zfsm_7;
end
zfsm_7: begin
rz <= 0;
zfsm <= zfsm_8;
end
zfsm_8: begin
rz <= 0;
zfsm <= zfsm_9;
end
zfsm_9: begin
rz <= 0;
zfsm <= zfsm_10;
end
zfsm_10: begin
rz <= 0;
zfsm <= zfsm_11;
end
zfsm_11: begin
rz <= 0;
zfsm <= zfsm_12;
end
zfsm_12: begin
rz <= 0;
zfsm <= zfsm_13;
end
zfsm_13: begin
zfsm <= zfsm_2;
end
endcase
end
end
always @(posedge CLK) begin
if(reset_done) begin
if(vx && rx) begin
$display("x=%d", x);
end
if(vy && ry) begin
$display("y=%d", y);
end
if(vz && rz) begin
$display("z=%d", z);
end
end
end
endmodule
module blinkled
(
input CLK,
input RST,
input [32-1:0] x,
input vx,
output rx,
input [32-1:0] y,
input vy,
output ry,
output [32-1:0] z,
output vz,
input rz
);
assign rx = (_df_ready_0 || !_df_valid_0) && (vx && vy);
assign ry = (_df_ready_0 || !_df_valid_0) && (vx && vy);
reg [32-1:0] _df_data_0;
reg _df_valid_0;
wire _df_ready_0;
assign _df_ready_0 = rz;
assign z = _df_data_0;
assign vz = _df_valid_0;
always @(posedge CLK) begin
if(RST) begin
_df_data_0 <= 0;
_df_valid_0 <= 0;
end else begin
if(vx && vy && (rx && ry) && (_df_ready_0 || !_df_valid_0)) begin
_df_data_0 <= x + y;
end
if(_df_valid_0 && _df_ready_0) begin
_df_valid_0 <= 0;
end
if(rx && ry && (_df_ready_0 || !_df_valid_0)) begin
_df_valid_0 <= vx && vy;
end
end
end
endmodule
"""
def test():
test_module = pipeline_draw_graph.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
data/Mimino666/langdetect/langdetect/detector_factory.py
|
import os
from os import path
import sys
try:
import simplejson as json
except ImportError:
import json
from .detector import Detector
from .lang_detect_exception import ErrorCode, LangDetectException
from .utils.lang_profile import LangProfile
class DetectorFactory(object):
'''
Language Detector Factory Class.
This class manages an initialization and constructions of Detector.
Before using language detection library,
load profiles with DetectorFactory.load_profile(str)
and set initialization parameters.
When the language detection,
construct Detector instance via DetectorFactory.create().
See also Detector's sample code.
'''
seed = None
def __init__(self):
self.word_lang_prob_map = {}
self.langlist = []
def load_profile(self, profile_directory):
list_files = os.listdir(profile_directory)
if not list_files:
raise LangDetectException(ErrorCode.NeedLoadProfileError, 'Not found profile: ' + profile_directory)
langsize, index = len(list_files), 0
for filename in list_files:
if filename.startswith('.'):
continue
filename = path.join(profile_directory, filename)
if not path.isfile(filename):
continue
f = None
try:
if sys.version_info[0] < 3:
f = open(filename, 'r')
else:
f = open(filename, 'r', encoding='utf-8')
json_data = json.load(f)
profile = LangProfile(**json_data)
self.add_profile(profile, index, langsize)
index += 1
except IOError:
raise LangDetectException(ErrorCode.FileLoadError, 'Cannot open "%s"' % filename)
except:
raise LangDetectException(ErrorCode.FormatError, 'Profile format error in "%s"' % filename)
finally:
if f:
f.close()
def load_json_profile(self, json_profiles):
langsize, index = len(json_profiles), 0
if langsize < 2:
raise LangDetectException(ErrorCode.NeedLoadProfileError, 'Need more than 2 profiles.')
for json_profile in json_profiles:
try:
json_data = json.loads(json_profile)
profile = LangProfile(**json_data)
self.add_profile(profile, index, langsize)
index += 1
except:
raise LangDetectException(ErrorCode.FormatError, 'Profile format error.')
def add_profile(self, profile, index, langsize):
lang = profile.name
if lang in self.langlist:
raise LangDetectException(ErrorCode.DuplicateLangError, 'Duplicate the same language profile.')
self.langlist.append(lang)
for word in profile.freq:
if word not in self.word_lang_prob_map:
self.word_lang_prob_map[word] = [0.0] * langsize
length = len(word)
if 1 <= length <= 3:
prob = 1.0 * profile.freq.get(word) / profile.n_words[length - 1]
self.word_lang_prob_map[word][index] = prob
def clear(self):
self.langlist = []
self.word_lang_prob_map = {}
def create(self, alpha=None):
'''Construct Detector instance with smoothing parameter.'''
detector = self._create_detector()
if alpha is not None:
detector.set_alpha(alpha)
return detector
def _create_detector(self):
if not self.langlist:
raise LangDetectException(ErrorCode.NeedLoadProfileError, 'Need to load profiles.')
return Detector(self)
def set_seed(self, seed):
self.seed = seed
def get_lang_list(self):
return list(self.langlist)
PROFILES_DIRECTORY = path.join(path.dirname(__file__), 'profiles')
_factory = None
def init_factory():
global _factory
if _factory is None:
_factory = DetectorFactory()
_factory.load_profile(PROFILES_DIRECTORY)
def detect(text):
init_factory()
detector = _factory.create()
detector.append(text)
return detector.detect()
def detect_langs(text):
init_factory()
detector = _factory.create()
detector.append(text)
return detector.get_probabilities()
|
data/DamnWidget/anaconda/anaconda_lib/jedi/utils.py
|
"""
Utilities for end-users.
"""
from __future__ import absolute_import
import __main__
from collections import namedtuple
import re
import os
import sys
from jedi import Interpreter
from jedi.api.helpers import completion_parts
from jedi.parser.user_context import UserContext
def setup_readline(namespace_module=__main__):
"""
Install Jedi completer to :mod:`readline`.
This function setups :mod:`readline` to use Jedi in Python interactive
shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
``$HOME/.pythonrc.py``), you can add this piece of code::
try:
from jedi.utils import setup_readline
setup_readline()
except ImportError:
print("Jedi is not installed, falling back to readline")
try:
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
except ImportError:
print("Readline is not installed either. No tab completion is enabled.")
This will fallback to the readline completer if Jedi is not installed.
The readline completer will only complete names in the global namespace,
so for example::
ran<TAB>
will complete to ``range``
with both Jedi and readline, but::
range(10).cou<TAB>
will show complete to ``range(10).count`` only with Jedi.
You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
bash).
"""
class JediRL(object):
def complete(self, text, state):
"""
This complete stuff is pretty weird, a generator would make
a lot more sense, but probably due to backwards compatibility
this is still the way how it works.
The only important part is stuff in the ``state == 0`` flow,
everything else has been copied from the ``rlcompleter`` std.
library module.
"""
if state == 0:
sys.path.insert(0, os.getcwd())
try:
interpreter = Interpreter(text, [namespace_module.__dict__])
path = UserContext(text, (1, len(text))).get_path_until_cursor()
path, dot, like = completion_parts(path)
before = text[:len(text) - len(like)]
completions = interpreter.completions()
finally:
sys.path.pop(0)
self.matches = [before + c.name_with_symbols for c in completions]
try:
return self.matches[state]
except IndexError:
return None
try:
import readline
except ImportError:
print("Module readline not available.")
else:
readline.set_completer(JediRL().complete)
readline.parse_and_bind("tab: complete")
readline.parse_and_bind("set completion-ignore-case on")
readline.parse_and_bind("set show-all-if-unmodified")
readline.parse_and_bind("set show-all-if-ambiguous on")
readline.parse_and_bind("set completion-prefix-display-length 2")
readline.set_completer_delims('')
def version_info():
"""
Returns a namedtuple of Jedi's version, similar to Python's
``sys.version_info``.
"""
Version = namedtuple('Version', 'major, minor, micro')
from jedi import __version__
tupl = re.findall('[a-z]+|\d+', __version__)
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
|
data/adieu/djangoappengine/tests/transactions.py
|
from .testmodels import EmailModel
from django.db.models import F
from django.test import TestCase
class TransactionTest(TestCase):
emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com']
def setUp(self):
EmailModel(email=self.emails[0], number=1).save()
EmailModel(email=self.emails[0], number=2).save()
EmailModel(email=self.emails[1], number=3).save()
def test_update(self):
self.assertEqual(2, len(EmailModel.objects.all().filter(
email=self.emails[0])))
self.assertEqual(1, len(EmailModel.objects.all().filter(
email=self.emails[1])))
EmailModel.objects.all().filter(email=self.emails[0]).update(
email=self.emails[1])
self.assertEqual(0, len(EmailModel.objects.all().filter(
email=self.emails[0])))
self.assertEqual(3, len(EmailModel.objects.all().filter(
email=self.emails[1])))
def test_f_object_updates(self):
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=1)))
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=2)))
EmailModel.objects.all().filter(email=self.emails[0]).update(number=
F('number') + F('number'))
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=2)))
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=4)))
EmailModel.objects.all().filter(email=self.emails[1]).update(number=
F('number') + 10, email=self.emails[0])
self.assertEqual(1, len(EmailModel.objects.all().filter(number=13)))
self.assertEqual(self.emails[0], EmailModel.objects.all().get(number=13).
email)
EmailModel.objects.all().filter(number=13).update(number=
F('number')*(F('number') + 10) - 5, email=self.emails[0])
self.assertEqual(1, len(EmailModel.objects.all().filter(number=294)))
|
data/PublicMapping/DistrictBuilder/django/publicmapping/redistricting/views.py
|
"""
Django views used by the redistricting application.
The methods in redistricting.views define the views used to interact with
the models in the redistricting application. Each method relates to one
type of output url. There are views that return GeoJSON, JSON, and HTML.
This file is part of The Public Mapping Project
https://github.com/PublicMapping/
License:
Copyright 2010-2012 Micah Altman, Michael McDonald
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author:
Andrew Jennings, David Zwarg, Kenny Shepard
"""
from django.http import *
from django.core import serializers
from django.core.exceptions import ValidationError, SuspiciousOperation, ObjectDoesNotExist
from django.db import IntegrityError, connection, transaction
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.core.context_processors import csrf
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.db import SessionStore
from django.contrib.gis.geos.collections import MultiPolygon
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.gdal import *
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.sites.models import Site
from django.contrib import humanize
from django.template import loader, Context as DjangoContext, RequestContext
from django.utils import simplejson as json, translation
from django.utils.translation import ugettext as _, ungettext as _n
from django.template.defaultfilters import slugify, force_escape
from django.conf import settings
from tagging.utils import parse_tag_input
from tagging.models import Tag, TaggedItem
from datetime import datetime, time, timedelta
from decimal import *
from functools import wraps
from redistricting.calculators import *
from redistricting.models import *
from redistricting.tasks import *
import random, string, math, types, copy, time, threading, traceback, os
import commands, sys, tempfile, csv, hashlib, inflect, logging
import ModestMaps
from PIL import Image, ImageChops, ImageMath
import urllib, urllib2
from xhtml2pdf.pisa import CreatePDF
import StringIO
logger = logging.getLogger(__name__)
UNASSIGNED_DISTRICT_ID = 0
def using_unique_session(u):
"""
A test to determine if the user of the application is using a unique
session. Each user is permitted one unique session (one session in the
django_session table that has not yet expired). If the user exceeds
this quota, this test fails, and the user will get bounced to the login
url.
Parameters:
u - The user. May be anonymous or registered.
Returns:
True - the user is an AnonymousUser or the number of sessions open
by the user is only 1 (one must be open to make the request)
False - the user is registered and has more than one open session.
"""
if u.is_anonymous() or u.is_superuser:
return True
sessions = Session.objects.all()
count = 0
for session in sessions:
try:
decoded = session.get_decoded()
if '_auth_user_id' in decoded and decoded['_auth_user_id'] == u.id:
if 'activity_time' in decoded and decoded['activity_time'] < datetime.now():
Session.objects.filter(session_key=session.session_key).delete()
else:
count += 1
except SuspiciousOperation:
logger.debug("SuspiciousOperation caught while checking the number of sessions a user has open. Session key: %s", session.session_key)
for session in sessions:
try:
decoded = session.get_decoded()
if '_auth_user_id' in decoded and decoded['_auth_user_id'] == u.id:
websession = SessionStore(session_key=session.session_key)
websession['count'] = count
websession.save()
except SuspiciousOperation:
logger.debug("SuspiciousOperation caught while setting the session count on all user sessions. Session key: %s", session.session_key)
return (count <= 1)
def unique_session_or_json_redirect(function):
"""
A decorator method. Any method that accepts this decorator
should have an HttpRequest as a parameter called "request".
That request will be checked for a unique session. If the
test passes, the original method is returned. If the session
is not unique, then a JSON response is returned and the
client is redirected to log off.
"""
def decorator(request, *args, **kwargs) :
def return_nonunique_session_result():
status = { 'success': False }
status['message'] = _(
"The current user may only have one session open at a time.")
status['redirect'] = '/?msg=logoff'
return HttpResponse(json.dumps(status),mimetype='application/json')
if not using_unique_session(request.user):
return return_nonunique_session_result()
else:
return function(request, *args, **kwargs)
return wraps(function)(decorator)
def is_session_available(req):
"""
Determine if a session is available. This is similar to a user test,
but requires access to the user's session, so it cannot be used in the
user_passes_test decorator.
Parameters:
req - The HttpRequest object, with user and session information.
"""
if req.user.is_superuser or req.user.is_staff:
return True
sessions = Session.objects.filter(expire_date__gt=datetime.now())
count = 0
for session in sessions:
try:
decoded = session.get_decoded()
if (not req.user.is_anonymous()) and 'activity_time' in decoded and decoded['activity_time'] > datetime.now():
count += 1
except SuspiciousOperation:
logger.debug("SuspiciousOperation caught while checking the last activity time in a user's session. Session key: %s", session.session_key)
avail = count < settings.CONCURRENT_SESSIONS
req.session['avail'] = avail
return avail
def note_session_activity(req):
"""
Add a session 'timeout' whenever a user performs an action. This is
required to keep dormant (not yet expired, but inactive) sessions
from maxing out the concurrent session limit.
Parameters:
req - An HttpRequest, with a session attribute
"""
window = timedelta(0,0,0,0,settings.SESSION_TIMEOUT)
req.session['activity_time'] = datetime.now() + window
@login_required
def unloadplan(request, planid):
"""
Unload a plan.
This view is called anytime a plan is unloaded. Example: navigating
away from the page, or selecting a new plan. This method allows
for any required plan cleanup such as purging temporary versions.
Parameters:
request -- The HttpRequest, which includes the user.
planid -- The plan to unload.
Returns:
A JSON HttpResponse which includes a status.
"""
note_session_activity(request)
status = { 'success': False }
ps = Plan.objects.filter(pk=planid)
if len(ps) > 0:
p = ps[0]
if not can_copy(request.user, p):
status['message'] = _("User %(user)s doesn't have permission to unload this plan") % {'user':request.user.username}
return HttpResponse(json.dumps(status),mimetype='application/json')
if settings.MAX_UNDOS_AFTER_EDIT > 0:
p.purge_beyond_nth_step(settings.MAX_UNDOS_AFTER_EDIT)
status['success'] = True
return HttpResponse(json.dumps(status),mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def copyplan(request, planid):
"""
Copy a plan to a new, editable plan.
This view is called by the plan chooser and the share plan tab. These
actions take a template or shared plan, and copy the plan without its
history into an editable plan in the current user's account.
Parameters:
request -- The HttpRequest, which includes the user.
planid -- The original plan to copy.
Returns:
A JSON HttpResponse which includes either an error message or the
copied plan ID.
"""
note_session_activity(request)
if not is_plan_ready(planid):
return HttpResponseRedirect('/')
status = { 'success': False }
p = Plan.objects.get(pk=planid)
if not can_copy(request.user, p):
status['message'] = _("User %(username)s doesn't have permission to " \
"copy this model" % {'username': request.user.username})
return HttpResponse(json.dumps(status),mimetype='application/json')
newname = p.name + " " + str(random.random())
if (request.method == "POST" ):
newname = request.POST["name"][0:200]
shared = request.POST.get("shared", False)
plan_copy = Plan.objects.filter(name=newname, owner=request.user, legislative_body=p.legislative_body)
if len(plan_copy) > 0:
status['message'] = _("You already have a plan named that. " \
"Please pick a unique name.")
return HttpResponse(json.dumps(status),mimetype='application/json')
plan_copy = Plan(name=newname, owner=request.user, is_shared=shared, legislative_body=p.legislative_body, processing_state=ProcessingState.READY)
plan_copy.create_unassigned = False
plan_copy.save()
districts = p.get_districts_at_version(p.version, include_geom=True)
for district in districts:
district_copy = copy.copy(district)
district_copy.id = None
district_copy.version = 0
district_copy.is_locked = False
district_copy.plan = plan_copy
try:
district_copy.save()
except Exception as inst:
status["message"] = _("Could not save district copies")
status["exception"] = inst.message
return HttpResponse(json.dumps(status),mimetype='application/json')
district_copy.clone_relations_from(district)
data = serializers.serialize("json", [ plan_copy ])
return HttpResponse(data, mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def scoreplan(request, planid):
"""
Validate a plan to allow for it to be shown in the leaderboard
Parameters:
request -- The HttpRequest, which includes the user.
planid -- The plan to score.
Returns:
A JSON HttpResponse which includes a status, and if applicable,
a reason why the plan couldn't be validated
"""
note_session_activity(request)
status = { 'success': False }
plan = Plan.objects.get(pk=planid)
criterion = ValidationCriteria.objects.filter(legislative_body=plan.legislative_body)
status['success'] = True
for criteria in criterion:
try:
score = ComputedPlanScore.compute(criteria.function, plan)
except:
logger.debug(traceback.format_exc())
if not score or not score['value']:
status['success'] = False
status['message'] = '<p>%s</p><p>%s</p>' % (criteria.get_short_label(), criteria.get_long_description() or criteria.function.get_long_description())
break
if status['success']:
status['success'] = True
status['message'] = _("Validation successful")
plan.is_valid = True
plan.save()
return HttpResponse(json.dumps(status),mimetype='application/json')
def get_user_info(user):
"""
Get extended user information for the current user.
Parameters:
user -- The user attached to the HttpRequest
Returns:
A dict with user information, including profile information.
"""
if user.is_anonymous():
return None
profile = user.get_profile()
return {
'username':user.username,
'email':user.email,
'password_hint':profile.pass_hint,
'firstname':user.first_name,
'lastname':user.last_name,
'organization':profile.organization,
'id':user.id
}
def commonplan(request, planid):
"""
A common method that gets the same data structures for viewing
and editing. This method is called by the viewplan and editplan
views.
Parameters:
request -- An HttpRequest
planid -- The plan ID to fetch.
Returns:
A python dict with common plan attributes set to the plan's values.
"""
note_session_activity(request)
plan = Plan.objects.filter(id=planid)
if plan.count() == 1:
plan = plan[0]
plan.edited = getutc(plan.edited)
levels = plan.legislative_body.get_geolevels()
districts = plan.get_districts_at_version(plan.version,include_geom=False)
editable = can_edit(request.user, plan)
default_demo = plan.legislative_body.get_default_subject()
max_dists = plan.legislative_body.max_districts
body_member_short_label = plan.legislative_body.get_short_label()
body_member_long_label = plan.legislative_body.get_label()
body_members = plan.legislative_body.get_members_label()
reporting_template = 'bard_%s.html' % plan.legislative_body.name if not plan.is_community() else None
index = body_member_short_label.find('%(district_id)s')
if index >= 0:
body_member_short_label = body_member_short_label[0:index]
index = body_member_long_label.find('%(district_id)s')
if index >= 0:
body_member_long_label = body_member_long_label[0:index]
if not editable and not can_view(request.user, plan):
plan = {}
tags = []
calculator_reports = []
else:
tags = Tag.objects.filter(name__startswith='type=').order_by('id').values_list('name',flat=True)
tags = map(lambda x:x[5:], tags)
calculator_reports = []
if settings.REPORTS_ENABLED == 'CALC':
report_displays = ScoreDisplay.objects.filter(name="%s_reports" % plan.legislative_body.name)
if len(report_displays) > 0:
calculator_reports = map(lambda p: {
'title': p.__unicode__(),
'functions': map(lambda f: {
'label': f.get_label(),
'id': f.id
}, p.score_functions.all().filter(selectable_bodies=plan.legislative_body))
}, report_displays[0].scorepanel_set.all().order_by('position'))
else:
plan = {}
levels = list()
districts = {}
editable = False
default_demo = None
max_dists = 0
body_member_short_label = ''
body_member_long_label = _('District') + ' '
body_members = _n('district','districts',2)
reporting_template = None
tags = []
calculator_reports = []
demos = Subject.objects.all().order_by('sort_key')[0:3]
layers = []
snaplayers = []
if len(levels) > 0:
study_area_extent = list(levels[0].geounit_set.extent(field_name='simple'))
else:
for lb in LegislativeBody.objects.all():
biglevel = lb.get_geolevels()[0]
if biglevel.geounit_set.count() > 0:
study_area_extent = biglevel.geounit_set.extent(field_name='simple')
break
for level in levels:
snaplayers.append({
'geolevel': level.id,
'level': level.name,
'layer': 'simple_' + level.name,
'long_description': level.get_long_description(),
'min_zoom': level.min_zoom
})
default_selected = False
for demo in demos:
isdefault = str((not default_demo is None) and (demo.id == default_demo.id)).lower()
if isdefault == 'true':
default_selected = True
layers.append({
'id':demo.id,
'text':demo.get_short_label(),
'value':demo.name,
'isdefault':isdefault,
'isdisplayed':str(demo.is_displayed).lower()
})
if default_demo and not default_selected:
layers.insert( 0, {
'id':default_demo.id,
'text':default_demo.get_short_label(),
'value':default_demo.name,
'isdefault':str(True).lower(),
'isdisplayed':str(default_demo.is_displayed).lower()
})
if 'MAP_SERVER_PROTOCOL' in settings.__members__:
mapserver_protocol = settings.MAP_SERVER_PROTOCOL
else:
mapserver_protocol = ''
short_label = body_member_short_label.strip().lower()
long_label = body_member_long_label.strip().lower()
has_regions = Region.objects.all().count() > 1
bodies = LegislativeBody.objects.all().order_by('region__sort_key','sort_key')
l_bodies = [b for b in bodies if b in [sd.legislative_body for sd in ScoreDisplay.objects.filter(is_page=True)]]
try:
loader.get_template(reporting_template)
except:
reporting_template = None
return RequestContext(request, {
'bodies': bodies,
'has_regions': has_regions,
'leaderboard_bodies': l_bodies,
'plan': plan,
'districts': districts,
'mapserver': settings.MAP_SERVER,
'mapserver_protocol': mapserver_protocol,
'basemaps': settings.BASE_MAPS,
'namespace': settings.MAP_SERVER_NS,
'ns_href': settings.MAP_SERVER_NSHREF,
'feature_limit': settings.FEATURE_LIMIT,
'adjacency': settings.ADJACENCY,
'convexchoropleth': settings.CONVEX_CHOROPLETH,
'demographics': layers,
'snaplayers': snaplayers,
'unassigned_id': UNASSIGNED_DISTRICT_ID,
'is_registered': request.user.username != 'anonymous' and request.user.username != '',
'debugging_staff': settings.DEBUG and request.user.is_staff,
'userinfo': get_user_info(request.user),
'is_editable': editable,
'max_dists': max_dists + 1,
'ga_account': settings.GA_ACCOUNT,
'ga_domain': settings.GA_DOMAIN,
'body_member_short_label': short_label,
'body_member_long_label': long_label,
'body_members': body_members,
'reporting_template': reporting_template,
'study_area_extent': study_area_extent,
'has_leaderboard' : len(ScoreDisplay.objects.filter(is_page=True)) > 0,
'calculator_reports' : json.dumps(calculator_reports),
'allow_email_submissions': ('EMAIL_SUBMISSION' in settings.__members__),
'tags': tags,
'site': Site.objects.get_current(),
'plan_text': _("community map") if (plan and plan.is_community()) else _("plan"),
'language_code': translation.get_language(),
'LANGUAGES': settings.LANGUAGES
})
def is_plan_ready(planid):
"""
Determines if a plan is in a Ready state
"""
planid = int(planid)
return planid == 0 or len(Plan.objects.filter(id=planid, processing_state=ProcessingState.READY)) > 0
@user_passes_test(using_unique_session)
def viewplan(request, planid):
"""
View a plan.
This template has no editing capability.
Parameters:
request -- An HttpRequest, which includes the current user.
planid -- The plan to view
Returns:
A rendered HTML page for viewing a plan.
"""
if not is_session_available(request) or not is_plan_ready(planid):
return HttpResponseRedirect('/')
if not request.user.is_anonymous() and (int(planid) == 0) and (settings.MAX_UNDOS_AFTER_EDIT > 0):
for p in Plan.objects.filter(owner=request.user):
p.purge_beyond_nth_step(settings.MAX_UNDOS_AFTER_EDIT)
return render_to_response('viewplan.html', commonplan(request, planid))
@user_passes_test(using_unique_session)
def editplan(request, planid):
"""
Edit a plan.
This template enables editing tools and functionality.
Parameters:
request -- An HttpRequest, which includes the current user.
planid -- The plan to edit.
Returns:
A rendered HTML page for editing a plan.
"""
if request.user.is_anonymous() or not is_session_available(request) or not is_plan_ready(planid):
return HttpResponseRedirect('/')
cfg = commonplan(request, planid)
if cfg['is_editable'] == False:
return HttpResponseRedirect('/districtmapping/plan/%s/view/' % planid)
plan = Plan.objects.get(id=planid,owner=request.user)
cfg['dists_maxed'] = len(cfg['districts']) > plan.legislative_body.max_districts
cfg['available_districts'] = plan.get_available_districts()
if settings.MAX_UNDOS_AFTER_EDIT > 0:
plan.purge_beyond_nth_step(settings.MAX_UNDOS_AFTER_EDIT)
return render_to_response('editplan.html', cfg)
@user_passes_test(using_unique_session)
def printplan(request, planid):
"""
Print a static map of a plan.
This template renders a static HTML document for use with xhtml2pdf.
Parameters:
request -- An HttpRequest, which includes the current user.
planid -- The plan to edit.
Returns:
A rendered HTML page suitable for conversion to a PDF.
"""
if not is_session_available(request):
return HttpResponseRedirect('/')
cfg = commonplan(request, planid)
sha = hashlib.sha1()
sha.update(str(planid) + str(datetime.now()))
cfg['composite'] = '/reports/print-%s.jpg' % sha.hexdigest()
cfg['prefix'] = 'http://%s' % request.META['SERVER_NAME']
if request.method == 'POST':
if not 'bbox' in request.REQUEST or \
not 'geography_url' in request.REQUEST or \
not 'geography_lyr' in request.REQUEST or \
not 'district_url' in request.REQUEST or \
not 'district_lyr' in request.REQUEST:
logger.warning('Missing required "bbox", "geography_url", "geography_lyr", "district_url", or "districts_lyr" parameter.')
return HttpResponseRedirect('../view/')
height = 500*2
if 'height' in request.REQUEST:
height = int(request.REQUEST['height'])*2
width = 1024*2
if 'width' in request.REQUEST:
width = int(request.REQUEST['width'])*2
opacity = 0.8
if 'opacity' in request.REQUEST:
opacity = float(request.REQUEST['opacity'])
full_legend = json.loads(request.REQUEST['legend'])
cfg['geography_url'] = request.REQUEST['geography_url']
cfg['geography_lyr'] = request.REQUEST['geography_lyr']
cfg['district_url'] = request.REQUEST['district_url']
cfg['district_lyr'] = request.REQUEST['district_lyr']
cfg['geo_legend'] = full_legend['geo']
cfg['geo_legend_title'] = full_legend['geotitle']
cfg['dist_legend'] = full_legend['dist']
cfg['dist_legend_title'] = full_legend['disttitle']
cfg['plan'] = Plan.objects.get(id=int(request.REQUEST['plan_id']))
cfg['printed'] = datetime.now()
bbox = request.REQUEST['bbox'].split(',')
pt1 = Point(float(bbox[0]), float(bbox[1]), srid=3785)
pt1.transform(SpatialReference('EPSG:4326'))
ll = ModestMaps.Geo.Location(pt1.y, pt1.x)
pt2 = Point(float(bbox[2]), float(bbox[3]), srid=3785)
pt2.transform(SpatialReference('EPSG:4326'))
ur = ModestMaps.Geo.Location(pt2.y, pt2.x)
dims = ModestMaps.Core.Point(width, height)
provider = ModestMaps.OpenStreetMap.Provider()
basemap = ModestMaps.mapByExtent(provider, ll, ur, dims)
fullImg = basemap.draw()
provider = ModestMaps.WMS.Provider(cfg['geography_url'], {
'LAYERS':cfg['geography_lyr'],
'TRANSPARENT':'true',
'SRS': 'EPSG:3785',
'HEIGHT': 512,
'WIDTH': 512
})
overlayImg = ModestMaps.mapByExtent(provider, ll, ur, dims).draw()
maskImg = ImageChops.invert(overlayImg)
provider = ModestMaps.WMS.Provider(cfg['district_url'], {
'LAYERS':cfg['district_lyr'],
'TRANSPARENT':'false',
'SRS': 'EPSG:3785',
'SLD_BODY': request.REQUEST['district_sld'],
'HEIGHT': 512,
'WIDTH': 512
})
overlayImg = Image.blend(overlayImg, ModestMaps.mapByExtent(provider, ll, ur, dims).draw(), 0.5)
fullImg = Image.composite(fullImg, Image.blend(fullImg, overlayImg, opacity), maskImg)
provider = ModestMaps.WMS.Provider(cfg['district_url'], {
'LAYERS':cfg['district_lyr'],
'TRANSPARENT':'true',
'SRS': 'EPSG:3785',
'SLD_BODY': request.REQUEST['label_sld'],
'HEIGHT': 512,
'WIDTH': 512
})
overlayImg = ModestMaps.mapByExtent(provider, ll, ur, dims).draw()
maskImg = ImageChops.invert(overlayImg)
fullImg = Image.composite(fullImg, Image.blend(fullImg, overlayImg, opacity), maskImg)
fullImg.save(settings.WEB_TEMP + ('/print-%s.jpg' % sha.hexdigest()),'jpeg',quality=100)
t = loader.get_template('printplan.html')
page = t.render(DjangoContext(cfg))
result = StringIO.StringIO()
CreatePDF(page, result, show_error_as_pdf=True)
response = HttpResponse(result.getvalue(), mimetype='application/pdf')
response['Content-Disposition'] = 'attachment; filename=plan.pdf'
return response
else:
return HttpResponseRedirect('../view/')
@login_required
@unique_session_or_json_redirect
def createplan(request):
"""
Create a plan.
Create a plan from a POST request. This plan will be 'blank', and will
contain only the Unassigned district initially.
Parameters:
request -- An HttpRequest, which contains the current user.
Returns:
A JSON HttpResponse, including the new plan's information, or an
error describing why the plan could not be created.
"""
note_session_activity(request)
status = { 'success': False }
if request.method == "POST":
name = request.POST['name'][0:200]
body = LegislativeBody.objects.get(id=int(request.POST['legislativeBody']))
plan = Plan(name=name, owner=request.user, legislative_body=body, processing_state=ProcessingState.READY)
try:
plan.save()
status = serializers.serialize("json", [ plan ])
except:
status = { 'success': False, 'message': _("Couldn't save new plan") }
return HttpResponse(json.dumps(status),mimetype='application/json')
@unique_session_or_json_redirect
def uploadfile(request):
"""
Accept a block equivalency file, and create a plan based on that
file.
Parameters:
request -- An HttpRequest, with a file upload and plan name.
Returns:
A plan view, with additional information about the upload status.
"""
note_session_activity(request)
if request.user.is_anonymous():
return HttpResponseRedirect('/')
status = commonplan(request,0)
status['upload'] = True
status['upload_status'] = True
index_file = request.FILES.get('indexFile', False)
if not index_file:
status['upload_status'] = False
return render_to_response('viewplan.html', status)
else:
filename = index_file.name
if index_file.size > settings.MAX_UPLOAD_SIZE:
logger.error('File size exceeds allowable size.')
status['upload_status'] = False
return render_to_response('viewplan.html', status)
if not filename.endswith(('.csv','.zip')):
logger.error('Uploaded file must be ".csv" or ".zip".')
status['upload_status'] = False
elif request.POST['userEmail'] == '':
logger.error('No email provided for user notification.')
status['upload_status'] = False
else:
try:
dest = tempfile.NamedTemporaryFile(mode='wb+', delete=False)
for chunk in request.FILES['indexFile'].chunks():
dest.write(chunk)
dest.close()
if request.FILES['indexFile'].name.endswith('.zip'):
os.rename(dest.name, '%s%s' % (dest.name,'.zip'))
filename = '%s%s' % (dest.name,'.zip')
else:
filename = dest.name
except Exception as ex:
logger.error('Could not save uploaded file')
logger.error('Reason: %s', ex)
status['upload_status'] = False
return render_to_response('viewplan.html', status)
DistrictIndexFile.index2plan.delay(request.POST['txtNewName'], request.POST['legislativeBody'], filename, owner = request.user, template = False, purge = True, email = request.POST['userEmail'], language=translation.get_language())
return render_to_response('viewplan.html', status)
def generate_report_hash(qdict):
"""
Generate a hash based on the query items passed to this report request.
"""
params = qdict.get('popVar', ' ') + \
qdict.get('popVarExtra', ' ') + \
qdict.get('ratioVars[]', ' ') + \
qdict.get('splitVars', ' ') + \
qdict.get('blockLabelVar', 'CTID') + \
qdict.get('repCompactness', ' ') + \
qdict.get('repCompactnessExtra', ' ') + \
qdict.get('repSpatial', ' ') + \
qdict.get('repSpatialExtra', ' ')
sha = hashlib.sha1()
sha.update(params)
return sha.hexdigest()
@unique_session_or_json_redirect
def getreport(request, planid):
"""
Get a BARD report.
This view will write out an HTML-formatted BARD report to the directory
given in the settings.
Parameters:
request -- An HttpRequest
planid -- The plan to be reported.
Returns:
The HTML for use as a preview in the web application, along with
the web address of the BARD report.
"""
note_session_activity(request)
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_view(request.user, plan):
status['message'] = _("User can't view the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
if not settings.REPORTS_ENABLED is None:
status['message'] = _('Reports functionality is turned off.')
return HttpResponse(json.dumps(status),mimetype='application/json')
if request.method != 'POST':
status['message'] = _("Information for report wasn't sent via POST")
return HttpResponse(json.dumps(status),mimetype='application/json')
stamp = request.POST.get('stamp', generate_report_hash(request.POST))
rptstatus = PlanReport.checkreport(planid, stamp)
if rptstatus == 'ready':
status = {
'success': True,
'url': PlanReport.getreport(planid, stamp),
'retry': 0,
'message': _('Plan report is ready.'),
'stamp': stamp
}
elif rptstatus == 'busy':
status = {
'success': True,
'url': reverse(getreport, args=[planid]),
'retry': 10,
'message': _('Report is building.'),
'stamp': stamp
}
elif rptstatus == 'free':
status = {
'success': True,
'url': reverse(getreport, args=[planid]),
'retry': 10,
'message': _('Report generation started.'),
'stamp': stamp
}
req = {
'popVar': request.POST.get('popVar', ''),
'popVarExtra': request.POST.get('popVarExtra', ''),
'ratioVars[]': request.POST.getlist('ratioVars[]'),
'splitVars': request.POST.get('splitVars', ''),
'blockLabelVar': request.POST.get('blockLabelVar', 'CTID'),
'repComp': request.POST.get('repCompactness', ''),
'repCompExtra': request.POST.get('repCompactnessExtra', ''),
'repSpatial': request.POST.get('repSpatial', ''),
'repSpatialExtra': request.POST.get('repSpatialExtra', '')
}
PlanReport.markpending(planid, stamp)
PlanReport.createreport.delay(planid, stamp, req, language=translation.get_language())
else:
status['message'] = _(
'Unrecognized status when checking report status.')
return HttpResponse(json.dumps(status),mimetype='application/json')
@unique_session_or_json_redirect
def getcalculatorreport(request, planid):
"""
Get a report which is generated by using calculators.
This view will write out an HTML-formatted report to the directory
given in the settings.
Parameters:
request -- An HttpRequest
planid -- The plan to be reported.
Returns:
The HTML for use as a preview in the web application, along with
the web address of the report.
"""
note_session_activity(request)
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_view(request.user, plan):
status['message'] = _("User can't view the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
if request.method != 'POST':
status['message'] = _("Information for report wasn't sent via POST")
return HttpResponse(json.dumps(status),mimetype='application/json')
function_ids = request.POST.get('functionIds', '')
sha = hashlib.sha1()
sha.update(function_ids)
stamp = request.POST.get('stamp', sha.hexdigest())
rptstatus = CalculatorReport.checkreport(planid, stamp)
if rptstatus == 'ready':
status = {
'success': True,
'url': CalculatorReport.getreport(planid, stamp),
'retry': 0,
'message': _('Plan report is ready.'),
'stamp': stamp
}
elif rptstatus == 'busy':
status = {
'success': True,
'url': reverse(getcalculatorreport, args=[planid]),
'retry': 5,
'message': _('Report is building.'),
'stamp': stamp
}
elif rptstatus == 'free':
status = {
'success': True,
'url': reverse(getcalculatorreport, args=[planid]),
'retry': 5,
'message': _('Report generation started.'),
'stamp': stamp
}
req = { 'functionIds': function_ids }
CalculatorReport.markpending(planid, stamp)
CalculatorReport.createcalculatorreport.delay(planid, stamp, req, language=translation.get_language())
else:
status['message'] = _(
'Unrecognized status when checking report status.')
return HttpResponse(json.dumps(status),mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def newdistrict(request, planid):
"""
Create a new district.
The 'geolevel' parameter is required to create a new district. Geounits
may be added to this new district by setting the 'geounits' key in the
request.
Parameters:
request - An HttpRequest, with the current user.
planid - The plan id to which the district should be added.
Returns:
The new District's name and district_id.
"""
note_session_activity(request)
status = { 'success': False }
if len(request.REQUEST.items()) >= 3:
plan = Plan.objects.get(pk=planid, owner=request.user)
if 'geolevel' in request.REQUEST:
geolevel = request.REQUEST['geolevel']
else:
geolevel = None
if 'geounits' in request.REQUEST:
geounit_ids = string.split(request.REQUEST['geounits'], '|')
else:
geounit_ids = None
if 'district_id' in request.REQUEST:
district_id = int(request.REQUEST['district_id'])
else:
district_id = None
if 'district_short' in request.REQUEST:
district_short = request.REQUEST['district_short'][0:10]
elif not district_id is None:
district_short = plan.legislative_body.get_short_label() % {'district_id':district_id}
else:
district_short = None
if 'district_long' in request.REQUEST:
district_long = request.REQUEST['district_long'][0:256]
elif not district_id is None:
district_long = plan.legislative_body.get_label() % {'district_id':district_id}
else:
district_long = None
if 'version' in request.REQUEST:
version = request.REQUEST['version']
else:
version = plan.version
if geolevel and geounit_ids and district_id:
try:
fixed = plan.add_geounits((district_id, district_short, district_long,), geounit_ids, geolevel, version)
district = plan.district_set.filter(district_id=district_id,short_label=district_short,long_label=district_long)[0]
if plan.legislative_body.multi_members_allowed:
district.num_members = plan.legislative_body.min_multi_district_members
district.save()
ct = ContentType.objects.get(app_label='redistricting',model='district')
if 'comment' in request.POST and request.POST['comment'] != '':
comment = Comment(
object_pk=district.id,
content_type=ct,
site_id=Site.objects.get_current().id,
user_name=request.user.username,
user_email=request.user.email,
comment=request.POST['comment'])
comment.save()
if len(request.REQUEST.getlist('type[]')) > 0:
strtags = request.REQUEST.getlist('type[]')
for strtag in strtags:
if strtag == '':
continue
if strtag.count(' ') > 0:
strtag = '"type=%s"' % strtag
else:
strtag = 'type=%s' % strtag
Tag.objects.add_tag(district, strtag)
status['success'] = True
status['message'] = _('Created 1 new district')
plan = Plan.objects.get(pk=planid, owner=request.user)
status['edited'] = getutc(plan.edited).isoformat()
status['district_id'] = district_id
status['version'] = plan.version
except ValidationError:
status['message'] = _('Reached Max districts already')
except Exception, ex:
logger.warn('Error saving new district')
logger.debug('Reason: %s', ex)
status['message'] = _("Couldn't save new district.")
else:
status['message'] = _('Must specify name, geolevel, ' \
'and geounit ids for new district.')
return HttpResponse(json.dumps(status),mimetype='application/json')
@login_required
@unique_session_or_json_redirect
@transaction.commit_manually
def add_districts_to_plan(request, planid):
"""
This handler is used to paste existing districts from one
plan into another plan
Parameters:
request -- An HttpRequest object including a list of districtids and
a version
planid -- The plan into which to paste the districts
Returns:
Some JSON explaining the success or failure of the paste operation
"""
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_edit(request.user, plan):
status['message'] = _("User can't edit the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
district_list = request.POST.getlist('districts[]')
if len(district_list) == 0:
status['message'] = _("No districts selected to add to the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
else:
districts = District.objects.filter(id__in=district_list)
version = int(request.POST.get('version', None))
status['message'] = _('Going to merge %(number_of_merged_districts)d' \
' districts') % {'number_of_merged_districts': len(districts)}
allowed_districts = plan.get_available_districts(version=version)
if len(districts) > allowed_districts:
status['message'] = _('Tried to merge too many districts; ' \
'%(allowed_districts)d slots left') % {'allowed_districts': allowed_districts}
try:
results = plan.paste_districts(districts, version=version)
transaction.commit()
status['success'] = True
status['message'] = _('Merged %(num_merged_districts)d districts') % {'num_merged_districts': len(results)}
status['version'] = plan.version
except Exception as ex:
transaction.rollback()
status['message'] = str(ex)
status['exception'] = traceback.format_exc()
return HttpResponse(json.dumps(status),mimetype='application/json')
@login_required
@unique_session_or_json_redirect
@transaction.commit_manually
def assign_district_members(request, planid):
"""
This handler is used to assign members to districts
Parameters:
request -- An HttpRequest object including a version,
and a mapping of districtids to num_members
planid -- The plan into which to assign district members
Returns:
Some JSON explaining the success or failure of the paste operation
"""
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_edit(request.user, plan):
status['message'] = _("User can't edit the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
leg_bod = plan.legislative_body
if (not leg_bod.multi_members_allowed):
status['message'] = _(
'Multi-members not allowed for this legislative body')
return HttpResponse(json.dumps(status),mimetype='application/json')
districts = request.POST.getlist('districts[]')
counts = request.POST.getlist('counts[]')
version = int(request.POST.get('version', None))
try:
changed = 0
for i in range(0, len(districts)):
id = int(districts[i])
count = int(counts[i])
district = District.objects.filter(plan=plan,district_id=id,version__lte=version).order_by('version').reverse()[0]
if district.num_members != count:
if (changed == 0):
if version != plan.version:
plan.purge(after=version)
plan.version = plan.version + 1
plan.save()
plan.update_num_members(district, count)
changed += 1
transaction.commit()
status['success'] = True
status['version'] = plan.version
status['modified'] = changed
status['message'] = _('Modified members for %(num_districts)d '
'districts') % {'num_districts': changed}
except Exception, ex:
transaction.rollback()
status['message'] = str(ex)
status['exception'] = traceback.format_exc()
logger.warn('Could not assign district members')
logger.debug('Reason: %s', ex)
return HttpResponse(json.dumps(status),mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def combine_districts(request, planid):
"""
Take the contents of one district and add them to another districts
"""
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_edit(request.user, plan):
status['message'] = _("User can't edit the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
version = int(request.POST.get('version', plan.version))
from_id = int(request.POST.get('from_district_id', -1))
to_id = int(request.POST.get('to_district_id', None))
try:
all_districts = plan.get_districts_at_version(version, include_geom=True)
from_districts = filter(lambda d: True if d.district_id == from_id else False, all_districts)
to_district = filter(lambda d: True if d.district_id == to_id else False, all_districts)[0]
locked = to_district.is_locked
for district in from_districts:
if district.is_locked:
locked = True
if locked:
status['message'] = _("Can't combine locked districts")
return HttpResponse(json.dumps(status),mimetype='application/json')
result = plan.combine_districts(to_district, from_districts, version=version)
if result[0] == True:
status['success'] = True
status['message'] = _('Successfully combined districts')
status['version'] = result[1]
except Exception, ex:
status['message'] = _('Could not combine districts')
status['exception'] = traceback.format_exc()
logger.warn('Could not combine districts')
logger.debug('Reason: %s', ex)
return HttpResponse(json.dumps(status),mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def fix_unassigned(request, planid):
"""
Assign unassigned base geounits that are fully contained
or adjacent to another district
"""
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_edit(request.user, plan):
status['message'] = _("User can't edit the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
try:
version = int(request.POST.get('version', plan.version))
result = plan.fix_unassigned(version)
status['success'] = result[0]
status['message'] = result[1]
status['version'] = plan.version
except Exception, ex:
status['message'] = _('Could not fix unassigned')
status['exception'] = traceback.format_exc()
logger.warn('Could not fix unassigned')
logger.debug('Reason: %s', ex)
return HttpResponse(json.dumps(status),mimetype='application/json')
@unique_session_or_json_redirect
def get_splits(request, planid, otherid, othertype):
"""
Find all splits between this plan and another plan
Parameters:
request -- An HttpRequest optionally containing version and/or otherversion
planid -- The plan ID
otherid -- The plan ID or geolevel ID to find splits with
othertype -- One of: 'plan' or 'geolevel'. For specifying otherid
Returns:
A JSON HttpResponse that contains an array of splits, given as arrays,
where the first item is the district_id of the district in this plan
which causes the split, and the second item is the district_id of the
district in the other plan or geolevel. When a geolevel is specified,
the portable_id will be used, rather than the district_id.
"""
otherid = int(otherid)
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _('No plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_view(request.user, plan):
status['message'] = _("User can't view the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
version = int(request.REQUEST['version'] if 'version' in request.REQUEST else plan.version)
try:
if othertype == 'plan':
try:
otherplan = Plan.objects.get(pk=otherid)
except:
status['message'] = _('No other plan with the given id')
return HttpResponse(json.dumps(status),mimetype='application/json')
if not can_view(request.user, otherplan):
status['message'] = _("User can't view the given plan")
return HttpResponse(json.dumps(status),mimetype='application/json')
otherversion = int(request.REQUEST['otherversion'] if 'otherversion' in request.REQUEST else otherplan.version)
splits = plan.find_plan_splits(otherplan, version, otherversion)
elif othertype == 'geolevel':
splits = plan.find_geolevel_splits(otherid, version)
else:
status['message'] = _('othertype not supported: %(other)s') % { 'other': othertype }
return HttpResponse(json.dumps(status),mimetype='application/json')
split_word = _('split') if len(splits) == 1 else inflect.engine().plural(_('split'))
status['success'] = True
status['message'] = _('Found %(num_splits)d %(split_word)s') % \
{'num_splits': len(splits), 'split_word': split_word}
status['splits'] = splits
status['above_ids'] = list(set([i[0] for i in splits]))
status['below_ids'] = list(set([i[1] for i in splits]))
except Exception, ex:
status['message'] = _('Could not query for splits')
status['exception'] = traceback.format_exc()
logger.warn('Could not query for splits')
logger.debug('Reason: %s', ex)
return HttpResponse(json.dumps(status),mimetype='application/json')
def get_processing_status(request):
"""
Get the processing status for a list of plan ids
"""
status = { 'success': False }
plan_ids = request.REQUEST.getlist('planIds[]')
if len(plan_ids) == 0:
status['message'] = _('No planIds provided')
else:
statuses = {}
for p in Plan.objects.filter(id__in=plan_ids):
statuses[str(p.id)] = p.get_processing_state_display()
status['success'] = True
status['message'] = statuses
return HttpResponse(json.dumps(status),mimetype='application/json')
def get_splits_report(request, planid):
"""
Get the rendered splits report
"""
note_session_activity(request)
try:
plan = Plan.objects.get(pk=planid)
except:
return HttpResponse(_('Plan does not exist.'), mimetype='text/plain')
if not using_unique_session(request.user) or not can_view(request.user, plan):
return HttpResponseForbidden()
version = int(request.REQUEST['version'] if 'version' in request.REQUEST else plan.version)
inverse = request.REQUEST['inverse'] == 'true' if 'inverse' in request.REQUEST else False
extended = request.REQUEST['extended'] == 'true' if 'extended' in request.REQUEST else False
layers = request.REQUEST.getlist('layers[]')
if len(layers) == 0:
return HttpResponse(_('No layers were provided.'), mimetype='text/plain')
try :
report = loader.get_template('split_report.html')
html = ''
for layer in layers:
my_context = {'extended': extended}
my_context.update(plan.compute_splits(layer, version = version, inverse = inverse, extended = extended))
last_item = layer is layers[-1]
community_info = plan.get_community_type_info(layer, version = version, inverse = inverse, include_counts=last_item)
if community_info is not None:
my_context.update(community_info)
calc_context = DjangoContext(my_context)
html += report.render(calc_context)
if not last_item:
html += '<hr />'
return HttpResponse(html, mimetype='text/html')
except Exception, ex:
logger.warn('Could not produce split report')
logger.debug('Reason: %s', ex)
return HttpResponse(str(ex), mimetype='text/plain')
@login_required
@unique_session_or_json_redirect
def addtodistrict(request, planid, districtid):
"""
Add geounits to a district.
This method requires both "geolevel" and "geounits" URL parameters.
The geolevel must be a valid geolevel name and the geounits parameters
should be a pipe-separated list of geounit ids.
Parameters:
request -- An HttpRequest, with the current user, the geolevel, and
the pipe-separated geounit list.
planid -- The plan ID that contains the district.
districtid -- The district ID to which the geounits will be added.
Returns:
A JSON HttpResponse that contains the number of districts modified,
or an error message if adding fails.
"""
note_session_activity(request)
status = { 'success': False }
if len(request.REQUEST.items()) >= 2:
try:
geolevel = request.REQUEST["geolevel"]
geounit_ids = string.split(request.REQUEST["geounits"], "|")
plan = Plan.objects.get(pk=planid,owner=request.user)
except:
status['exception'] = traceback.format_exc()
status['message'] = _('Could not add units to district.')
if 'version' in request.REQUEST:
version = request.REQUEST['version']
else:
version = plan.version
try:
fixed = plan.add_geounits(districtid, geounit_ids, geolevel, version)
status['success'] = True;
status['message'] = _('Updated %(num_fixed_districts)d districts') \
% {'num_fixed_districts': fixed}
status['updated'] = fixed
plan = Plan.objects.get(pk=planid,owner=request.user)
status['edited'] = getutc(plan.edited).isoformat()
status['version'] = plan.version
except Exception, ex:
status['exception'] = traceback.format_exc()
status['message'] = _('Could not add units to district.')
logger.warn('Could not add units to district')
logger.debug('Reason: %s', ex)
else:
status['message'] = _("Geounits weren't found in a district.")
return HttpResponse(json.dumps(status),mimetype='application/json')
@unique_session_or_json_redirect
@login_required
def setdistrictlock(request, planid, district_id):
"""
Set whether this district is locked for editing.
Parameters:
request -- An HttpRequest, with a boolean that indicates whether the district
should be locked or unlocked
planid -- The plan ID that contains the district.
district_id -- The district_id to lock or unlock
Returns:
A JSON HttpResponse that contains a boolean of whether the district is locked.
"""
note_session_activity(request)
status = {'success':False}
if request.method != 'POST':
return HttpResponseForbidden()
lock = request.POST.get('lock').lower() == 'true'
version = request.POST.get('version')
if lock == None:
status['message'] = _('Must include lock parameter.')
elif version == None:
status['message'] = _('Must include version parameter.')
try:
plan = Plan.objects.get(pk=planid)
district = plan.district_set.filter(district_id=district_id,version__lte=version).order_by('version').reverse()[0]
except ObjectDoesNotExist:
status['message'] = _('Plan or district does not exist.')
return HttpResponse(json.dumps(status), mimetype='application/json')
if plan.owner != request.user:
return HttpResponseForbidden()
district.is_locked = lock
district.save()
status['success'] = True
status['message'] = _('District successfully %(locked_state)s') % \
{'locked_state': _('locked') if lock else _('unlocked')}
return HttpResponse(json.dumps(status), mimetype='application/json')
@unique_session_or_json_redirect
def getdistricts(request, planid):
"""
Get the districts in a plan at a specific version.
Parameters:
request - An HttpRequest, with the current user.
planid - The plan id to query for the districts.
Returns:
"""
note_session_activity(request)
status = {'success':False}
plan = Plan.objects.filter(id=planid)
if plan.count() == 1:
plan = plan[0]
if 'version' in request.REQUEST:
version = int(request.REQUEST['version'])
else:
version = plan.version
districts = plan.get_districts_at_version(version,include_geom=False)
status['districts'] = []
status['available'] = plan.legislative_body.max_districts - len(districts) + 1
max_version = max([d.version for d in districts])
can_undo = max_version > plan.min_version
for district in districts:
status['districts'].append({
'id':district.district_id,
'short_label':' '.join(map(_, district.short_label.split(' '))),
'long_label':' '.join(map(_, district.long_label.split(' '))),
'version':district.version
})
status['canUndo'] = can_undo
status['success'] = True
else:
status['message'] = _('No plan exists with that ID.')
return HttpResponse(json.dumps(status), mimetype='application/json')
def simple_district_versioned(request, planid, district_ids=None):
"""
Emulate a WFS service for versioned districts.
This function retrieves one version of the districts in a plan, with
the value of the subject attached to the feature. This function is
necessary because a traditional view could not be used to get the
districts in a versioned fashion.
This method accepts 'version__eq' and 'subjects__eq' URL parameters.
This method accepts an optional 'district_ids__eq' parameter, which is
a comma-separated list of district_ids to filter by
Parameters:
request -- An HttpRequest, with the current user.
planid -- The plan ID from which to get the districts.
Returns:
A GeoJSON HttpResponse, describing the districts in the plan.
"""
note_session_activity(request)
status = {'type':'FeatureCollection'}
plan = Plan.objects.filter(id=planid)
if plan.count() == 1:
plan = plan[0]
if 'version__eq' in request.REQUEST:
version = request.REQUEST['version__eq']
else:
version = plan.version
subject_id = None
if 'subject__eq' in request.REQUEST:
subject_id = request.REQUEST['subject__eq']
elif plan.legislative_body.get_default_subject():
subject_id = plan.legislative_body.get_default_subject().id
geolevel = plan.legislative_body.get_geolevels()[0].id
if 'level__eq' in request.REQUEST:
geolevel = int(request.REQUEST['level__eq'])
if 'district_ids__eq' in request.REQUEST:
district_ids = request.REQUEST['district_ids__eq']
if len(district_ids) > 0:
district_ids = district_ids.split(',')
else:
district_ids = []
if subject_id:
bbox = None
if 'bbox' in request.REQUEST:
bbox = request.REQUEST['bbox']
bbox = tuple( map( lambda x: float(x), bbox.split(',')))
else:
bbox = plan.district_set.all().extent(field_name='simple')
status['features'] = plan.get_wfs_districts(version, subject_id, bbox, geolevel, district_ids)
else:
status['features'] = []
status['message'] = _('Subject for districts is required.')
else:
status['features'] = []
status['message'] = _('Query failed.')
return HttpResponse(json.dumps(status),mimetype='application/json')
def get_unlocked_simple_geometries(request,planid):
"""
Emulate a WFS service for selecting unlocked geometries.
This function retrieves all unlocked geometries within a geolevel
for a given plan. This function is necessary because a traditional
view could not be used to obtain the geometries in a versioned fashion.
This method accepts 'version__eq', 'level__eq', and 'geom__eq' URL parameters.
Parameters:
request -- An HttpRequest, with the current user.
planid -- The plan ID from which to get the districts.
Returns:
A GeoJSON HttpResponse, describing the unlocked simplified geometries
"""
note_session_activity(request)
status = {'type':'FeatureCollection'}
plan = Plan.objects.filter(id=planid)
if plan.count() == 1:
plan = plan[0]
version = request.POST.get('version__eq', plan.version)
geolevel = request.POST.get('level__eq', plan.legislative_body.get_geolevels()[0].id)
geom = request.POST.get('geom__eq', None)
if geom is not None:
try:
wkt = request.POST.get('geom__eq', None)
geom = GEOSGeometry(wkt)
except GEOSException:
wkt = request.REQUEST['geom__eq'].replace('POLYGON', 'LINESTRING')
wkt = wkt.replace('((', '(').replace('))', ')')
try:
geom = GEOSGeometry(wkt)
except GEOSException:
geom = None
selection = Q(geom__intersects=geom)
districts = [d.id for d in plan.get_districts_at_version(version, include_geom=True) if d.is_locked]
locked = District.objects.filter(id__in=districts).collect()
locked_buffered = locked.simplify(100, True).buffer(100) if locked else None
filtered = Geolevel.objects.get(id=geolevel).geounit_set.filter(selection)
features = []
for feature in filtered:
geom = feature.simple
if locked and geom.intersects(locked_buffered):
if feature.geom.within(locked):
continue
if feature.geom.overlaps(locked):
geom = geom.difference(locked_buffered)
features.append({
'id': '_%d' % feature.id,
'geometry': json.loads(geom.json),
'properties': {
'name': feature.name,
'geolevel_id': geolevel,
'id': feature.id
}
})
status['features'] = features
return HttpResponse(json.dumps(status),mimetype='application/json')
else:
status['features'] = []
status['message'] = _('Geometry is required.')
else:
status['features'] = []
status['message'] = _('Invalid plan.')
return HttpResponse(json.dumps(status),mimetype='application/json')
@unique_session_or_json_redirect
def get_statistics(request, planid):
note_session_activity(request)
status = { 'success': False }
try:
plan = Plan.objects.get(pk=planid)
except:
status['message'] = _(
"Couldn't get geography info from the server. No plan with the given id.")
return HttpResponse( json.dumps(status), mimetype='application/json', status=500)
if 'version' in request.REQUEST:
try:
version = int(request.REQUEST['version'])
except:
version = plan.version
else:
version = plan.version
try:
display = ScoreDisplay.objects.get(legislative_body=plan.legislative_body, name="%s_sidebar_demo" % plan.legislative_body.name)
except:
status['message'] = _('Unable to get Demographics ScoreDisplay')
status['exception'] = traceback.format_exc()
if 'displayId' in request.REQUEST:
try:
display = ScoreDisplay.objects.get(pk=request.POST['displayId'])
except:
status['message'] = _('Unable to get Personalized ScoreDisplay')
status['exception'] = traceback.format_exc()
else:
logger.warn('No displayId in request.')
logger.warn(str(request.POST))
try :
html = display.render(plan, request, version=version)
return HttpResponse(html, mimetype='text/html')
except Exception, ex:
status['message'] = _("Couldn't render display tab.")
status['exception'] = traceback.format_exc()
logger.warn("Couldn't render display tab")
logger.debug('Reason: %s', ex)
return HttpResponse( json.dumps(status), mimetype='application/json', status=500)
def getutc(t):
"""
Given a datetime object, translate to a datetime object for UTC time.
"""
t_tuple = t.timetuple()
t_seconds = time.mktime(t_tuple)
return t.utcfromtimestamp(t_seconds)
@unique_session_or_json_redirect
def getdistrictfilestatus(request, planid):
"""
Given a plan id, return the status of the district index file
"""
note_session_activity(request)
status = { 'success':False }
plan = Plan.objects.get(pk=planid)
if not can_copy(request.user, plan):
return HttpResponseForbidden()
try:
is_shape = 'type' in request.REQUEST and request.REQUEST['type'] == 'shape'
file_status = DistrictFile.get_file_status(plan, shape=is_shape)
status['success'] = True
status['status'] = file_status
except Exception as ex:
status['message'] = _('Failed to get file status')
status['exception'] = ex
return HttpResponse(json.dumps(status),mimetype='application/json')
@unique_session_or_json_redirect
def getdistrictfile(request, planid):
"""
Given a plan id, email the user a zipped copy of
the district index file
"""
note_session_activity(request)
plan = Plan.objects.get(pk=planid)
if not can_copy(request.user, plan):
return HttpResponseForbidden()
is_shape = 'type' in request.REQUEST and request.REQUEST['type'] == 'shape'
file_status = DistrictFile.get_file_status(plan, shape=is_shape)
if file_status == 'done':
if is_shape:
archive = DistrictShapeFile.plan2shape(plan)
else:
archive = DistrictIndexFile.plan2index(plan)
response = HttpResponse(open(archive.name).read(), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="%s.zip"' % plan.get_friendly_name()
else:
if is_shape:
DistrictShapeFile.plan2shape.delay(plan)
else:
DistrictIndexFile.plan2index.delay(plan)
response = HttpResponse(_('File is not yet ready. Please try again in '
'a few minutes'))
return response
@unique_session_or_json_redirect
def emaildistrictindexfile(request, planid):
"""
Given a plan id, email a zipped copy of the district
index file to a specified address
"""
note_session_activity(request)
if request.method != 'POST':
return HttpResponseForbidden()
plan = Plan.objects.get(pk=planid)
if not can_copy(request.user, plan):
return HttpResponseForbidden()
DistrictIndexFile.emailfile.delay(plan, request.user, request.POST, translation.get_language())
return HttpResponse(json.dumps({
'success': True,
'message': _('Task submitted') }),
mimetype='application/json')
def getvalidplans(leg_body, owner=None):
"""
Returns the valid plans for a given legislative body and owner (optional)
"""
pfilter = Q(legislative_body=leg_body) & Q(is_valid=True)
if owner is not None:
pfilter = pfilter & Q(owner=owner)
return list(Plan.objects.filter(pfilter))
def getleaderboarddisplay(leg_body, owner_filter):
"""
Returns the leaderboard ScoreDisplay given a legislative body and owner
"""
try:
return ScoreDisplay.objects.get(name="%s_leader_%s" % (leg_body.name, owner_filter))
except:
return None
def getleaderboard(request):
"""
Get the rendered leaderboard
"""
note_session_activity(request)
if not using_unique_session(request.user):
return HttpResponseForbidden()
owner_filter = request.REQUEST['owner_filter']
body_pk = int(request.REQUEST['legislative_body']);
leg_body = LegislativeBody.objects.get(pk=body_pk)
display = getleaderboarddisplay(leg_body, owner_filter)
if display is None:
return HttpResponse(_('No display configured'), mimetype='text/plain')
plans = getvalidplans(leg_body, request.user if owner_filter == 'mine' else None)
try :
html = display.render(plans, request)
return HttpResponse(html, mimetype='text/html; charset=utf-8')
except Exception, ex:
logger.warn('Leaderboard could not be fetched.')
logger.debug('Reason: %s', ex)
return HttpResponse(str(ex), mimetype='text/plain')
def getleaderboardcsv(request):
"""
Get the leaderboard scores in csv form
"""
note_session_activity(request)
if not using_unique_session(request.user):
return HttpResponseForbidden()
owner_filter = request.REQUEST['owner_filter']
body_pk = int(request.REQUEST['legislative_body']);
leg_body = LegislativeBody.objects.get(pk=body_pk)
plans = getvalidplans(leg_body, request.user if owner_filter == 'mine' else None)
display = getleaderboarddisplay(leg_body, owner_filter)
plans = getvalidplans(leg_body, request.user if owner_filter == 'mine' else None)
panels = display.scorepanel_set.all().order_by('position')
try :
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=leaderboard_scores.csv'
writer = csv.writer(response)
writer.writerow(['Plan ID', 'Plan Name', 'User Name'] + [p.__unicode__() for p in panels])
for plan in plans:
row = [plan.id, plan.name, plan.owner.username]
for panel in panels:
function = panel.score_functions.all()[0]
score = ComputedPlanScore.compute(function, plan)
row.append(score['value'])
writer.writerow(row)
return response
except Exception, ex:
logger.warn("Couldn't generate CSV of leaderboard.")
logger.debug('Reason: %s', ex)
return HttpResponse(str(ex), mimetype='text/plain')
def getplans(request):
"""
Get the plans for the given user and return the data in a format readable
by the jqgrid
"""
note_session_activity(request)
if not using_unique_session(request.user):
return HttpResponseForbidden()
if request.method == 'POST':
page = int(request.POST.get('page', 1))
rows = int(request.POST.get('rows', 10))
sidx = request.POST.get('sidx', 'id')
sord = request.POST.get('sord', 'asc')
owner_filter = request.POST.get('owner_filter');
body_pk = request.POST.get('legislative_body');
body_pk = int(body_pk) if body_pk else body_pk;
search = request.POST.get('_search', False);
search_string = request.POST.get('searchString', '');
is_community = request.POST.get('is_community', False) == 'true';
else:
return HttpResponseForbidden()
end = page * rows
start = end - rows
if owner_filter == 'template':
available = Q(is_template=True)
elif owner_filter == 'shared':
available = Q(is_shared=True)
elif owner_filter == 'mine':
if request.user.is_anonymous():
return HttpResponseForbidden()
else:
available = Q(owner__exact=request.user)
elif owner_filter == 'all_available':
available = Q(is_template=True) | Q(is_shared=True)
if not request.user.is_anonymous():
available = available | Q(owner__exact=request.user)
else:
return HttpResponseBadRequest(_("Unknown filter method."))
not_creating = ~Q(processing_state=ProcessingState.CREATING) & ~Q(processing_state=ProcessingState.UNKNOWN)
if sidx.startswith('fields.'):
sidx = sidx[len('fields.'):]
if sidx == 'owner':
sidx = 'owner__username'
if sidx == 'plan_type':
sidx = 'legislative_body__name'
if sord == 'desc':
sidx = '-' + sidx
if search:
search_filter = Q(name__icontains = search_string) | Q(description__icontains = search_string) | Q(owner__username__icontains = search_string)
else:
search_filter = None
if body_pk:
body_filter = Q(legislative_body=body_pk)
all_plans = Plan.objects.filter(available, not_creating, body_filter, search_filter).order_by(sidx)
else:
community_filter = Q(legislative_body__is_community=is_community)
all_plans = Plan.objects.filter(available, not_creating, search_filter, community_filter).order_by(sidx)
if all_plans.count() > 0:
total_pages = math.ceil(all_plans.count() / float(rows))
else:
total_pages = 1
plans = all_plans[start:end]
plans_list = list()
for plan in plans:
plans_list.append({
'pk': plan.id,
'fields': {
'name': plan.name,
'description': plan.description,
'edited': time.mktime(plan.edited.timetuple()),
'is_template': plan.is_template,
'is_shared': plan.is_shared,
'owner': plan.owner.username,
'districtCount': '--',
'can_edit': can_edit(request.user, plan),
'plan_type': plan.legislative_body.get_long_description(),
'processing_state': plan.get_processing_state_display()
}
})
json_response = "{ \"total\":\"%d\", \"page\":\"%d\", \"records\":\"%d\", \"rows\":%s }" % (total_pages, page, len(all_plans), json.dumps(plans_list))
return HttpResponse(json_response,mimetype='application/json')
def get_shared_districts(request, planid):
"""
Get the shared districts in a given plan and return the
data in a format readable by the jqgrid
"""
note_session_activity(request)
if not using_unique_session(request.user):
return HttpResponseForbidden()
if request.method == 'POST':
page = int(request.POST.get('page', 1))
rows = int(request.POST.get('rows', 10))
else:
return HttpResponseForbidden()
end = page * rows
start = end - rows
try:
plan = Plan.objects.get(pk=planid)
if not can_copy(request.user, plan):
return HttpResponseForbidden()
all_districts = plan.get_districts_at_version(plan.version, include_geom=False)
except:
plan = None
all_districts = ()
if len(all_districts) > 0:
total_pages = math.ceil(len(all_districts) / float(rows))
else:
total_pages = 1
districts = all_districts[start:end]
districts_list = list()
for district in districts:
if not district.is_unassigned:
districts_list.append({
'pk': district.id,
'fields': {
'short_label': district.short_label,
'long_label': district.long_label,
'district_id': district.district_id,
}
})
json_response = "{ \"total\":\"%d\", \"page\":\"%d\", \"records\":\"%d\", \"rows\":%s }" % (total_pages, page, len(all_districts), json.dumps(districts_list))
return HttpResponse(json_response,mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def editplanattributes(request, planid):
"""
Edit the attributes of a plan. Attributes of a plan are the name and/or
description.
"""
note_session_activity(request)
status = { 'success': False }
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
new_name = request.POST.get('name', None)
new_description = request.POST.get('description', '')
if not planid or not (new_name or new_description):
return HttpResponseBadRequest(
_('Must declare planId, name and description'))
plan = Plan.objects.filter(pk=planid,owner=request.user)
if plan.count() == 1:
plan = plan[0]
if not new_name is None:
plan.name = new_name
plan.description = new_description
try:
plan.save()
status['success'] = True
status['message'] = _('Updated plan attributes')
except Exception, ex:
status['message'] = _('Failed to save the changes to your plan')
status['exception'] = ex
logger.warn('Could not save changes to plan.')
logger.debug('Reason: %s', ex)
else:
status['message'] = _("Cannot edit a plan you don't own.")
return HttpResponse(json.dumps(status), mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def deleteplan(request, planid):
"""
Delete a plan
"""
note_session_activity(request)
status = { 'success': False }
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
if not planid:
return HttpResponseBadRequest(_('Must declare planId'))
plan = Plan.objects.filter(pk=planid,owner=request.user)
if plan.count() == 1:
plan = plan[0]
try:
plan.delete()
status['success'] = True
status['message'] = _('Deleted plan')
except Exception, ex:
status['message'] = _('Failed to delete plan')
status['exception'] = ex
logger.warn('Could not delete plan.')
logger.debug('Reason: %s', ex)
else:
status['message'] = _("Cannot delete a plan you don't own.")
return HttpResponse(json.dumps(status), mimetype='application/json')
@login_required
@unique_session_or_json_redirect
def reaggregateplan(request, planid):
"""
Reaggregate a plan
"""
note_session_activity(request)
status = { 'success': False }
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
if not planid:
return HttpResponseBadRequest(_('Must declare planId'))
plan = Plan.objects.filter(pk=planid,owner=request.user)
if plan.count() == 1:
plan = plan[0]
try:
reaggregate_plan.delay(plan.id)
plan.processing_state = ProcessingState.REAGGREGATING
plan.save()
status['success'] = True
status['message'] = _('Reaggregating plan')
except Exception, ex:
status['message'] = _('Failed to reaggregate plan')
status['exception'] = ex
logger.warn('Could not reaggregate plan.')
logger.debug('Reason: %s', ex)
else:
status['message'] = _("Cannot reaggregate a plan you don't own.")
return HttpResponse(json.dumps(status), mimetype='application/json')
def get_health(request):
def num_users(minutes):
users = 0
for session in Session.objects.all():
try:
decoded = session.get_decoded()
except:
session.delete()
continue
if 'activity_time' in decoded:
activity_delta = decoded['activity_time'] - timedelta(0,0,0,0,settings.SESSION_TIMEOUT)
if activity_delta > (datetime.now() - timedelta(0,0,0,0,minutes)):
users += 1
return users
try:
result = _('Health retrieved at %(time)s\n') % {'time': datetime.now()}
result += _('%(plan_count)d plans in database\n') % \
{'plan_count': Plan.objects.all().count()}
result += _('%(session_count)d sessions in use out of %(session_limit)s\n') % \
{'session_count': Session.objects.all().count(),
'session_limit': settings.CONCURRENT_SESSIONS}
result += _('%(num_users)d active users over the last 10 minutes\n') % \
{'num_users': num_users(10)}
space = os.statvfs('/projects/PublicMapping')
result += _('%(mb_free)s MB of disk space free\n') % \
{'mb_free': ((space.f_bsize * space.f_bavail) / (1024*1024))}
result += _('Memory Usage:\n%(mem_free)s\n') % \
{'mem_free': commands.getoutput('free -m')}
return HttpResponse(result, mimetype='text/plain')
except:
return HttpResponse(_("ERROR! Couldn't get health:\n%s") % traceback.format_exc())
def statistics_sets(request, planid):
result = { 'success': False }
plan = Plan.objects.filter(id=planid)
if plan.count() == 0:
result['message'] = _('No plan with that ID exists.')
return HttpResponse(json.dumps(result),mimetype='application/json')
else:
plan = plan[0]
if request.method == 'GET':
sets = []
scorefunctions = []
user_functions = ScoreFunction.objects.filter(selectable_bodies=plan.legislative_body).order_by('name')
for f in user_functions:
if 'report' not in f.name.lower() and 'comments' not in f.name.lower():
scorefunctions.append({ 'id': f.id, 'name': force_escape(f.get_label()) })
result['functions'] = scorefunctions
admin_display_names = [
"%s_sidebar_demo" % plan.legislative_body.name,
]
if plan.legislative_body.is_community:
admin_display_names.append("%s_sidebar_comments" %
plan.legislative_body.name)
else:
admin_display_names.append("%s_sidebar_basic" %
plan.legislative_body.name)
admin_displays = ScoreDisplay.objects.filter(
owner__is_superuser=True,
legislative_body=plan.legislative_body,
name__in=admin_display_names
)
for admin_display in admin_displays:
sets.append({
'id': admin_display.id,
'name': force_escape(admin_display.get_label()),
'functions': [],
'mine':False
})
try:
user_displays = ScoreDisplay.objects.filter(
owner=request.user,
legislative_body=plan.legislative_body,
is_page=False).order_by('title')
result['displays_count'] = len(user_displays)
for display in user_displays:
functions = []
for panel in display.scorepanel_set.all():
if panel.type == 'district':
functions = map(lambda x: x.id, panel.score_functions.all())
if len(functions) == 0:
result['message'] = _("No functions for %(panel)s") % \
{'panel_name': panel}
sets.append({ 'id': display.id, 'name': force_escape(display.__unicode__()), 'functions': functions, 'mine': display.owner==request.user })
except Exception, ex:
result['message'] = _('No user displays for %(user)s') % \
{'user': request.user}
logger.warn('Error fetching ScoreDisplays for user')
logger.debug('Reason: %s', ex)
result['sets'] = sets
result['success'] = True
elif request.method == 'POST' and 'delete' in request.POST:
try:
display = ScoreDisplay.objects.get(pk=request.REQUEST.get('id', -1))
result['set'] = {'name':force_escape(display.__unicode__()), 'id':display.id}
qset = display.scorepanel_set.all()
for panel in qset:
if panel.displays.count() == 1:
panel.delete()
display.delete()
result['success'] = True
except Exception, ex:
result['message'] = _("Couldn't delete personalized scoredisplay")
result['exception'] = traceback.format_exc()
logger.warn("Couldn't delete personalized ScoreDisplay")
logger.debug('Reason: %s', ex)
elif request.method == 'POST':
def validate_num(user, limit=3):
return ScoreDisplay.objects.filter(owner=user, legislative_body=plan.legislative_body, is_page=False).count() < limit
if 'functions[]' in request.POST:
functions = request.POST.getlist('functions[]')
functions = map(lambda x: int(x), functions)
try:
display = ScoreDisplay.objects.get(title=request.POST.get('name'), owner=request.user)
display = display.copy_from(display=display, functions=functions)
except:
limit = 3
if validate_num(request.user, limit):
demo = ScoreDisplay.objects.filter(
owner__is_superuser=True,
legislative_body=plan.legislative_body,
is_page=False,
title="Demographics"
)
for disp in demo:
has_comments = False
for pnl in disp.scorepanel_set.all():
for fn in pnl.score_functions.all():
has_comments = has_comments or fn.calculator.endswith('.Comments')
if not has_comments:
demo = disp
break
display = ScoreDisplay()
display = display.copy_from(display=demo, title=request.POST.get('name'), owner=request.user, functions=functions)
result['newRecord'] = True
else:
result['message'] = _('Each user is limited to %(limit)d '
'statistics sets. Please delete one or edit '
'an existing set.') % { 'limit': limit }
result['error'] = 'limit'
return HttpResponse(json.dumps(result),mimetype='application/json')
result['set'] = {'name':force_escape(display.__unicode__()), 'id':display.id, 'functions':functions, 'mine': display.owner==request.user}
result['success'] = True
else:
result['message'] = _("Didn't get functions in POST parameter")
return HttpResponse(json.dumps(result),mimetype='application/json')
def purge_plan_clear_cache(district, version):
"""
This is a helper method that purges a plan after a version, and clears
any pre-computed scores at the specified version.
"""
district.plan.purge(after=version)
district.plan.version = version
district.plan.save()
cache = district.computeddistrictscore_set.filter(function__calculator__endswith='.Comments')
cache.delete()
@unique_session_or_json_redirect
def district_info(request, planid, district_id):
"""
Get the comments that are attached to a district.
Parameters:
request -- An HttpRequest
planid -- The plan ID
district_id -- The district ID, this is the district number in a plan, and NOT the id of a district.
"""
status = { 'success': False }
plan = Plan.objects.filter(id=planid)
if plan.count() == 0:
status['message'] = _('No plan with that ID was found.')
else:
plan = plan[0]
version = plan.version
if 'version' in request.REQUEST:
try:
version = int(request.REQUEST['version'])
version = min(plan.version, int(version))
except:
pass
district_id = int(district_id)
district = plan.get_districts_at_version(version, include_geom=False)
district = filter(lambda d:d.district_id==district_id, district)
if request.method == 'POST':
district = plan.district_set.get(id=request.POST['object_pk'])
district.short_label = request.POST['district_short'][0:10]
district.long_label = request.POST['district_long'][0:256]
if district.version < version:
district_copy = copy.copy(district)
district_copy.id = None
district_copy.version = version
district_copy.save()
district_copy.clone_relations_from(district)
district = district_copy
else:
district.save()
has_comment = 'comment' in request.POST and request.POST['comment'] != ''
if has_comment:
ct = ContentType.objects.get(app_label='redistricting',model='district')
Comment.objects.filter(object_pk = district.id, content_type=ct).delete()
comment = Comment(
object_pk=district.id,
content_type=ct,
site_id=Site.objects.get_current().id,
user_name=request.user.username,
user_email=request.user.email,
comment=request.POST['comment'])
comment.save()
else:
district.save()
tset = Tag.objects.get_for_object(district).filter(name__startswith='type')
TaggedItem.objects.filter(tag__in=tset, object_id=district.id).delete()
purge_plan_clear_cache(district, version)
if len(request.REQUEST.getlist('type[]')) > 0:
strtags = request.REQUEST.getlist('type[]')
for strtag in strtags:
if strtag == '':
continue
if strtag.count(' ') > 0:
strtag = '"type=%s"' % strtag
else:
strtag = 'type=%s' % strtag
Tag.objects.add_tag(district, strtag)
status['version'] = version
status['success'] = True
return HttpResponse(json.dumps(status), mimetype='application/json')
def plan_feed(request):
feed = loader.get_template('plan_feed.xml')
plans = Plan.objects.all().order_by('-edited')[0:10]
geolevel = plans[0].legislative_body.get_geolevels()[0]
extent = geolevel.geounit_set.collect().extent
if extent[2] - extent[0] > extent[3] - extent[1]:
width = 500
height = int(500 * (extent[3]-extent[1]) / (extent[2]-extent[0]))
else:
width = int(500 * (extent[2]-extent[0]) / (extent[3]-extent[1]))
height = 500
mapserver = settings.MAP_SERVER if settings.MAP_SERVER != '' else request.META['SERVER_NAME']
context = {
'plans': plans,
'mapserver': mapserver,
'mapserver_ns': settings.MAP_SERVER_NS,
'extent': extent,
'width': width,
'height': height
}
xml = feed.render(DjangoContext(context))
return HttpResponse(xml, mimetype='application/atom+xml')
def share_feed(request):
feed = loader.get_template('shared_feed.xml')
plans = Plan.objects.filter(is_shared=True).order_by('-edited')[0:10]
if plans.count() < 0:
geolevel = plans[0].legislative_body.get_geolevels()[0]
extent = geolevel.geounit_set.collect().extent
if extent[2] - extent[0] > extent[3] - extent[1]:
width = 500
height = int(500 * (extent[3]-extent[1]) / (extent[2]-extent[0]))
else:
width = int(500 * (extent[2]-extent[0]) / (extent[3]-extent[1]))
height = 500
else:
extent = (0,0,0,0,)
width = 1
height = 1
mapserver = settings.MAP_SERVER if settings.MAP_SERVER != '' else request.META['SERVER_NAME']
context = {
'plans': plans,
'mapserver': mapserver,
'mapserver_ns': settings.MAP_SERVER_NS,
'extent': extent,
'width': width,
'height': height
}
xml = feed.render(DjangoContext(context))
return HttpResponse(xml, mimetype='application/atom+xml')
|
data/Starou/SimpleIDML/setup.py
|
import os
from distutils.core import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="SimpleIDML",
version="0.92.4",
license='BSD Licence',
author='Stanislas Guerra',
author_email='stanislas.guerra@gmail.com',
description='A library to manipulate Adobe(r) IDML(r) files.',
long_description=README,
package_dir={'': 'src'},
packages=[
'simple_idml',
'simple_idml.indesign',
],
package_data={
'simple_idml.indesign': [
'scripts/*.jsx',
]
},
data_files=[],
scripts=[
'src/scripts/simpleidml_create_package_from_dir.py',
'src/scripts/simpleidml_indesign_save_as.py',
'src/scripts/simpleidml_indesign_close_all_documents.py',
],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia :: Graphics',
'Topic :: Printing',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
data/RoseOu/flasky/venv/lib/python2.7/site-packages/forgery_py/forgery/name.py
|
"""Generate random names and name-related strings."""
import random
from ..dictionaries_loader import get_dictionary
__all__ = [
'first_name', 'last_name', 'full_name', 'male_first_name',
'female_first_name', 'company_name', 'job_title', 'job_title_suffix',
'title', 'suffix', 'location', 'industry'
]
def first_name():
"""Random male of female first name."""
_dict = get_dictionary('male_first_names')
_dict += get_dictionary('female_first_names')
return random.choice(_dict).strip()
def last_name():
"""Random last name."""
return random.choice(get_dictionary('last_names')).strip()
def full_name():
"""
Random full name. Equivalent of ``first_name() + ' ' + last_name()``.
"""
return first_name() + ' ' + last_name()
def male_first_name():
"""Random male first name."""
return random.choice(get_dictionary('male_first_names')).strip()
def female_first_name():
"""Random female first name."""
return random.choice(get_dictionary('female_first_names')).strip()
def company_name():
"""Random company name."""
return random.choice(get_dictionary('company_names')).strip()
def job_title():
"""Random job title."""
result = random.choice(get_dictionary('job_titles')).strip()
result = result.replace('
return result
def job_title_suffix():
"""Random job title suffix."""
return random.choice(get_dictionary('job_title_suffixes')).strip()
def title():
"""Random name title, e.g. ``Mr``."""
return random.choice(get_dictionary('name_titles')).strip()
def suffix():
"""Random name suffix, e.g. ``Jr``."""
return random.choice(get_dictionary('name_suffixes')).strip()
def location():
"""Random location name, e.g. ``MI6 Headquarters``."""
return random.choice(get_dictionary('locations')).strip()
def industry():
"""Random industry name."""
return random.choice(get_dictionary('industries')).strip()
|
data/Jakemichaeldrew/Ctrl-S/build/bdist.win32/winexe/temp/bz2.py
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'bz2.pyd')
mod = imp.load_dynamic(__name__, path)
__load()
del __load
|
data/SimonSapin/cairocffi/cairocffi/xcb.py
|
"""
cairocffi.xcb
~~~~~~~~~~~~~
Bindings for XCB surface objects using xcffib.
:copyright: Copyright 2014 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from xcffib import visualtype_to_c_struct
from . import cairo, constants
from .surfaces import Surface, SURFACE_TYPE_TO_CLASS
class XCBSurface(Surface):
"""The XCB surface is used to render cairo graphics to X Window System
windows and pixmaps using the XCB library.
Creates a cairo surface that targets the given drawable (pixmap or window).
.. note::
This class works using objects and libraries in :mod:`xcffib`
:param conn: The :class:`xcffib.Connection` for an open XCB connection
:param drawable:
An XID corresponding to an XCB drawable (a pixmap or a window)
:param visual: An :class:`xcffib.xproto.VISUALTYPE` object.
:param width: integer
:param height: integer
"""
def __init__(self, conn, drawable, visual, width, height):
c_visual = visualtype_to_c_struct(visual)
p = cairo.cairo_xcb_surface_create(
conn._conn, drawable, c_visual, width, height)
Surface.__init__(self, p)
def set_size(self, width, height):
"""
Informs cairo of the new size of the X Drawable underlying the surface.
For a surface created for a Window (rather than a Pixmap), this
function must be called each time the size of the window changes (for
a subwindow, you are normally resizing the window yourself, but for a
toplevel window, it is necessary to listen for
:class:`xcffib.xproto.ConfigureNotifyEvent`'s).
A Pixmap can never change size, so it is never necessary to call this
function on a surface created for a Pixmap.
:param width: integer
:param height: integer
"""
cairo.cairo_xcb_surface_set_size(self._pointer, width, height)
self._check_status()
SURFACE_TYPE_TO_CLASS[constants.SURFACE_TYPE_XCB] = XCBSurface
|
data/Pylons/pylons/tests/test_units/test_decorator_jsonify.py
|
import warnings
from paste.fixture import TestApp
from paste.registry import RegistryManager
from __init__ import TestWSGIController
def make_cache_controller_app():
from pylons.testutil import ControllerWrap, SetupCacheGlobal
from pylons.decorators import jsonify
from pylons.controllers import WSGIController
class CacheController(WSGIController):
@jsonify
def test_bad_json(self):
return ["this is neat"]
@jsonify
def test_bad_json2(self):
return ("this is neat",)
@jsonify
def test_good_json(self):
return dict(fred=42)
environ = {}
app = ControllerWrap(CacheController)
app = sap = SetupCacheGlobal(app, environ)
app = RegistryManager(app)
app = TestApp(app)
return app, environ
class TestJsonifyDecorator(TestWSGIController):
def setUp(self):
self.app, environ = make_cache_controller_app()
TestWSGIController.setUp(self)
environ.update(self.environ)
warnings.simplefilter('error', Warning)
def tearDown(self):
warnings.simplefilter('always', Warning)
def test_bad_json(self):
for action in 'test_bad_json', 'test_bad_json2':
try:
response = self.get_response(action=action)
except Warning, msg:
assert 'JSON responses with Array envelopes are' in msg[0]
def test_good_json(self):
response = self.get_response(action='test_good_json')
assert '{"fred": 42}' in response
assert response.header('Content-Type') == 'application/json; charset=utf-8'
|
data/NervanaSystems/neon/examples/fast-rcnn/voc_eval.py
|
"""
The mAP evaluation script and various util functions are from:
https://github.com/rbgirshick/py-faster-rcnn/commit/45e0da9a246fab5fd86e8c96dc351be7f145499f
"""
import xml.etree.ElementTree as ET
import os
import cPickle
import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames))
print 'Saving cached annotations to {:s}'.format(cachefile)
with open(cachefile, 'w') as f:
cPickle.dump(recs, f)
else:
with open(cachefile, 'r') as f:
recs = cPickle.load(f)
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / (tp + fp + 1e-10)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
data/HenryHu/pybbs/Config.py
|
from ConfigParser import *
from StringIO import *
from Log import Log
import datetime
class Config:
@staticmethod
def LoadConfig():
Config.parser = ConfigParser()
try:
sconff = open(CONFIG_FILE, "r")
except:
Log.warn("cannot open config file")
return
sconf = StringIO()
sconf.write("[sysconf]\n")
sconf.write(sconff.read())
sconf.seek(0)
Config.parser.readfp(sconf)
sconff.close()
sconf.close()
return
@staticmethod
def GetBoardsFile():
return BOARDS_FILE
@staticmethod
def GetInt(name, defval):
if (Config.parser.has_option('sysconf', name)):
return Config.parser.getint('sysconf', name)
else:
return defval
@staticmethod
def GetString(name, defval):
if (Config.parser.has_option('sysconf', name)):
val = Config.parser.get('sysconf', name)
if (val[0] == '"' and val.endswith('"')):
val = val[1:-1]
return val.decode('gbk')
else:
return defval
BBS_ROOT = '/home/bbs/'
BBS_XMPP_CERT_FILE = BBS_ROOT + "xmpp.crt"
BBS_XMPP_KEY_FILE = BBS_ROOT + "xmpp.key"
BOARDS_FILE = BBS_ROOT + '.BOARDS'
STRLEN = 80
ARTICLE_TITLE_LEN = 60
BM_LEN = 60
MAXBOARD = 400
CONFIG_FILE = BBS_ROOT + 'etc/sysconf.ini'
FILENAME_LEN = 20
OWNER_LEN = 30
SESSIONID_LEN = 32
REFRESH_TOKEN_LEN = 128
NAMELEN = 40
IDLEN = 12
MD5PASSLEN = 16
OLDPASSLEN = 14
MOBILE_NUMBER_LEN = 17
MAXCLUB = 128
MAXUSERS = 20000
MAX_MSG_SIZE = 1024
MAXFRIENDS = 400
MAXMESSAGE = 5
MAXSIGLINES = 6
IPLEN = 16
DEFAULTBOARD = "sysop"
BLESS_BOARD = "happy_birthday"
QUOTED_LINES = 10
MAXACTIVE = 8000
USHM_SIZE = MAXACTIVE + 10
UTMP_HASHSIZE = USHM_SIZE * 4
UCACHE_SEMLOCK = 0
LEN_FRIEND_EXP = 15
REFRESH_TIME = 30
USER_TITLE_LEN = 18
SESSION_TIMEOUT = datetime.timedelta(30)
SESSION_TIMEOUT_SECONDS = 86400*30
XMPP_IDLE_TIME = 300
XMPP_LONG_IDLE_TIME = 1800
XMPP_UPDATE_TIME_INTERVAL = 10
XMPP_PING_TIME_INTERVAL = 60
PUBLIC_SHMKEY = 3700
MAX_ATTACHSIZE = 20 * 1024 * 1024
BMDEL_DECREASE = True
SYSMAIL_BOARD = "sysmail"
ADD_EDITMARK = True
SEARCH_COUNT_LIMIT = 20
MAIL_SIZE_LIMIT = -1
SEC_DELETED_OLDHOME = 3600 * 24 * 3
SELF_INTRO_MAX_LEN = 800
|
data/SmartTeleMax/iktomi/iktomi/forms/shortcuts.py
|
from . import convs, widgets, fields
from iktomi.utils.i18n import N_
class PasswordConv(convs.Char):
error_mismatch = N_('password and confirm mismatch')
error_required = N_('password required')
def from_python(self, value):
return dict([(field.name, None) for field in self.field.fields])
def get_initial(self):
return ''
def to_python(self, value):
etalon = value[list(value)[0]]
for field in self.field.fields:
self.assert_(value[field.name] == etalon,
self.error_mismatch)
if self.required:
self.assert_(etalon not in (None, ''), self.error_required)
elif etalon in (None, ''):
return None
return etalon
def PasswordSet(name='password',
min_length=3, max_length=200, required=False,
password_label=None, confirm_label='confirm', filters=(),
**kwargs):
char = convs.Char(convs.length(min_length, max_length), *filters,
**dict(required=required))
items = (('pass', password_label), ('conf', confirm_label))
kwargs['fields'] = [fields.Field(subfieldname,
conv=char,
label=label,
widget=widgets.PasswordInput)
for subfieldname, label in items]
kwargs.setdefault('conv', PasswordConv(required=required))
kwargs.setdefault('widget', widgets.FieldSetWidget(
template='widgets/fieldset-line'))
return fields.FieldSet(name, get_initial=lambda: '', **kwargs)
|
data/VisualComputingInstitute/Beacon8/beacon8/init/Normal.py
|
import numpy as _np
def normal(std):
def init(shape, fan):
return std*_np.random.randn(*shape)
return init
|
data/LxMLS/lxmls-toolkit/lxmls/sequences/id_feature.py
|
from lxmls.sequences.label_dictionary import *
import pdb
class IDFeatures:
'''
Base class to extract features from a particular dataset.
feature_dic --> Dictionary of all existing features maps feature_name (string) --> feature_id (int)
feture_names --> List of feature names. Each position is the feature_id and contains the feature name
nr_feats --> Total number of features
feature_list --> For each sentence in the corpus contains a pair of node feature and edge features
dataset --> The original dataset for which the features were extracted
Caches (for speedup):
initial_state_feature_cache -->
node_feature_cache -->
edge_feature_cache -->
final_state_feature_cache -->
'''
def __init__(self, dataset):
'''dataset is a sequence list.'''
self.feature_dict = LabelDictionary()
self.feature_list = []
self.add_features = False
self.dataset = dataset
self.node_feature_cache = {}
self.initial_state_feature_cache = {}
self.final_state_feature_cache = {}
self.edge_feature_cache = {}
def get_num_features(self):
return len(self.feature_dict)
def build_features(self):
'''
Generic function to build features for a given dataset.
Iterates through all sentences in the dataset and extracts its features,
saving the node/edge features in feature list.
'''
self.add_features = True
for sequence in self.dataset.seq_list:
initial_features, transition_features, final_features, emission_features = \
self.get_sequence_features(sequence)
self.feature_list.append([initial_features, transition_features, final_features, emission_features])
self.add_features = False
def get_sequence_features(self, sequence):
'''
Returns the features for a given sequence.
For a sequence of size N returns:
Node_feature a list of size N. Each entry contains the node potentials for that position.
Edge_features a list of size N+1.
- Entry 0 contains the initial features
- Entry N contains the final features
- Entry i contains entries mapping the transition from i-1 to i.
'''
emission_features = []
initial_features = []
transition_features = []
final_features = []
features = []
features = self.add_initial_features(sequence, sequence.y[0], features)
initial_features.append(features)
for pos, tag in enumerate(sequence.y):
features = []
features = self.add_emission_features(sequence, pos, sequence.y[pos], features)
emission_features.append(features)
if pos > 0:
prev_tag = sequence.y[pos-1]
features = []
features = self.add_transition_features(sequence, pos-1, tag, prev_tag, features)
transition_features.append(features)
features = []
features = self.add_final_features(sequence, sequence.y[-1], features)
final_features.append(features)
return initial_features, transition_features, final_features, emission_features
def get_emission_features(self, sequence, pos, y):
all_feat = []
x = sequence.x[pos]
if(x not in self.node_feature_cache):
self.node_feature_cache[x] = {}
if(y not in self.node_feature_cache[x]):
node_idx = []
node_idx = self.add_emission_features(sequence, pos, y, node_idx)
self.node_feature_cache[x][y] = node_idx
idx = self.node_feature_cache[x][y]
all_feat = idx[:]
return all_feat
def get_transition_features(self, sequence, pos, y, y_prev):
assert(pos >= 0 and pos < len(sequence.x)), pdb.set_trace()
if(y not in self.edge_feature_cache):
self.edge_feature_cache[y] = {}
if(y_prev not in self.edge_feature_cache[y]):
edge_idx = []
edge_idx = self.add_transition_features(sequence, pos, y, y_prev, edge_idx)
self.edge_feature_cache[y][y_prev] = edge_idx
return self.edge_feature_cache[y][y_prev]
def get_initial_features(self, sequence, y):
if(y not in self.initial_state_feature_cache):
edge_idx = []
edge_idx = self.add_initial_features(sequence, y, edge_idx)
self.initial_state_feature_cache[y] = edge_idx
return self.initial_state_feature_cache[y]
def get_final_features(self, sequence, y_prev):
if(y_prev not in self.final_state_feature_cache):
edge_idx = []
edge_idx = self.add_final_features(sequence, y_prev, edge_idx)
self.final_state_feature_cache[y_prev] = edge_idx
return self.final_state_feature_cache[y_prev]
def add_initial_features(self, sequence, y, features):
y_name = self.dataset.y_dict.get_label_name(y)
feat_name = "init_tag:%s"%(y_name)
feat_id = self.add_feature(feat_name)
if(feat_id != -1):
features.append(feat_id)
return features
def add_final_features(self, sequence, y_prev, features):
y_name = self.dataset.y_dict.get_label_name(y_prev)
feat_name = "final_prev_tag:%s"%(y_name)
feat_id = self.add_feature(feat_name)
if(feat_id != -1):
features.append(feat_id)
return features
def add_emission_features(self, sequence, pos, y, features):
'''Add word-tag pair feature.'''
x = sequence.x[pos]
y_name = self.dataset.y_dict.get_label_name(y)
x_name = self.dataset.x_dict.get_label_name(x)
feat_name = "id:%s::%s"%(x_name,y_name)
feat_id = self.add_feature(feat_name)
if feat_id != -1:
features.append(feat_id)
return features
def add_transition_features(self, sequence, pos, y, y_prev, features):
""" Adds a feature to the edge feature list.
Creates a unique id if its the first time the feature is visited
or returns the existing id otherwise
"""
assert pos < len(sequence.x)-1, pdb.set_trace()
y_name = self.dataset.y_dict.get_label_name(y)
y_prev_name = self.dataset.y_dict.get_label_name(y_prev)
feat_name = "prev_tag:%s::%s"%(y_prev_name,y_name)
feat_id = self.add_feature(feat_name)
if(feat_id != -1):
features.append(feat_id)
return features
def add_feature(self, feat_name):
"""
Builds a dictionary of feature name to feature id
If we are at test time and we don't have the feature
we return -1.
"""
if(feat_name in self.feature_dict):
return self.feature_dict[feat_name]
if not self.add_features:
return -1
return self.feature_dict.add(feat_name)
|
data/adewes/blitzdb/blitzdb/tests/sql/test_many_to_many.py
|
import pytest
import pprint
from ..helpers.movie_data import Movie,Actor,Director
from .fixtures import backend
from blitzdb.backends.sql.relations import ManyToManyProxy
def test_basics(backend):
backend.init_schema()
backend.create_schema()
francis_coppola = Director({'name' : 'Francis Coppola'})
stanley_kubrick = Director({'name' : 'Stanley Kubrick'})
robert_de_niro = Actor({'name' : 'Robert de Niro','movies' : []})
harrison_ford = Actor({'name' : 'Harrison Ford'})
brian_de_palma = Director({'name' : 'Brian de Palma'})
al_pacino = Actor({'name' : 'Al Pacino','movies' : []})
scarface = Movie({'title' : 'Scarface','director' : brian_de_palma})
the_godfather = Movie({'title' : 'The Godfather',
'director' : francis_coppola})
space_odyssey = Movie({'title' : '2001 - A space odyssey',
'director' : stanley_kubrick})
clockwork_orange = Movie({'title' : 'A Clockwork Orange',
'director' : stanley_kubrick})
robert_de_niro.movies.append(the_godfather)
al_pacino.movies.append(the_godfather)
al_pacino.movies.append(scarface)
apocalypse_now = Movie({'title' : 'Apocalypse Now'})
star_wars_v = Movie({'title' : 'Star Wars V: The Empire Strikes Back'})
harrison_ford.movies = [star_wars_v]
backend.save(robert_de_niro)
backend.save(al_pacino)
backend.save(francis_coppola)
backend.save(stanley_kubrick)
backend.save(brian_de_palma)
backend.save(harrison_ford)
backend.update(stanley_kubrick,{'favorite_actor' : al_pacino})
backend.update(francis_coppola,{'favorite_actor' : robert_de_niro})
backend.save(the_godfather)
backend.save(clockwork_orange)
backend.save(space_odyssey)
backend.save(scarface)
backend.commit()
actor = backend.get(Actor,{'name' : 'Al Pacino'})
assert isinstance(actor.movies,ManyToManyProxy)
assert the_godfather in actor.movies
assert scarface in actor.movies
assert len(actor.movies) == 2
with backend.transaction():
actor.movies.remove(scarface)
assert scarface not in actor.movies
assert len(actor.movies) == 1
actor.movies.append(scarface)
assert len(actor.movies) == 2
assert scarface in actor.movies
actor.movies.append(scarface)
assert len(actor.movies) == 2
assert scarface in actor.movies
actor.movies.extend([scarface,the_godfather])
assert len(actor.movies) == 2
assert scarface in actor.movies
actor.movies.extend([scarface,the_godfather,star_wars_v])
assert len(actor.movies) == 3
assert star_wars_v in actor.movies
assert len(actor.movies[1:]) == 2
assert len(actor.movies[1:2]) == 1
assert len(actor.movies[:-1]) == 2
assert len(actor.movies[1:-1]) == 1
|
data/HewlettPackard/python-hpOneView/examples/scripts/get-managed-sans.py
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import re
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
try:
con.login(credential)
except:
print('Login failed')
def get_managed_sans(fcs):
sans = fcs.get_managed_sans()
pprint(sans)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display or list the available Managed SAN resources in the appliance
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HP OneView Authorized Login Domain''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
fcs = hpov.fcsans(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
get_managed_sans(fcs)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.