repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
will-Do/avocado-vt | virttest/staging/backports/simplejson/ordered_dict.py | 32 | 3407 | """Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
# pylint: disable=E0111
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self) == len(other) and \
all(p == q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| gpl-2.0 |
doomsterinc/odoo | addons/product_extended/wizard/wizard_price.py | 270 | 3043 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP S.A. (<http://www.openerp.com>).
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
class wizard_price(osv.osv):
_name = "wizard.price"
_description = "Compute price wizard"
_columns = {
'info_field': fields.text('Info', readonly=True),
'real_time_accounting': fields.boolean("Generate accounting entries when real-time"),
'recursive': fields.boolean("Change prices of child BoMs too"),
}
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_price, self).default_get(cr, uid, fields, context=context)
product_pool = self.pool.get('product.template')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
res['info_field'] = str(product_pool.compute_price(cr, uid, [], template_ids=[product_obj.id], test=True, context=context))
return res
def compute_from_bom(self, cr, uid, ids, context=None):
assert len(ids) == 1
if context is None:
context = {}
model = context.get('active_model')
if model != 'product.template':
raise except_orm(_('Wrong model!'), _('This wizard is build for product templates, while you are currently running it from a product variant.'))
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
prod_obj = self.pool.get('product.template')
res = self.browse(cr, uid, ids, context=context)
prod = prod_obj.browse(cr, uid, rec_id, context=context)
prod_obj.compute_price(cr, uid, [], template_ids=[prod.id], real_time_accounting=res[0].real_time_accounting, recursive=res[0].recursive, test=False, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosansarmenian_bold.py | 2 | 4119 | # -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansArmenian-Bold'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x005B) #uniFB14
glyphs.append(0x0060) #uni0548_uni0552
glyphs.append(0x0058) #uni0589
glyphs.append(0x0050) #uni0580
glyphs.append(0x0051) #uni0581
glyphs.append(0x0052) #uni0582
glyphs.append(0x0053) #uni0583
glyphs.append(0x0054) #uni0584
glyphs.append(0x0055) #uni0585
glyphs.append(0x0056) #uni0586
glyphs.append(0x0057) #uni0587
glyphs.append(0x0011) #uni053E
glyphs.append(0x0010) #uni053D
glyphs.append(0x0012) #uni053F
glyphs.append(0x000D) #uni053A
glyphs.append(0x000F) #uni053C
glyphs.append(0x000E) #uni053B
glyphs.append(0x000C) #uni0539
glyphs.append(0x000B) #uni0538
glyphs.append(0x0008) #uni0535
glyphs.append(0x0007) #uni0534
glyphs.append(0x000A) #uni0537
glyphs.append(0x0009) #uni0536
glyphs.append(0x0004) #uni0531
glyphs.append(0x0006) #uni0533
glyphs.append(0x0005) #uni0532
glyphs.append(0x0059) #uni058A
glyphs.append(0x005F) #uni058F
glyphs.append(0x0003) #space
glyphs.append(0x0030) #uni055F
glyphs.append(0x002F) #uni055E
glyphs.append(0x002E) #uni055D
glyphs.append(0x002D) #uni055C
glyphs.append(0x002C) #uni055B
glyphs.append(0x002B) #uni055A
glyphs.append(0x002A) #uni0559
glyphs.append(0x0029) #uni0556
glyphs.append(0x0028) #uni0555
glyphs.append(0x0027) #uni0554
glyphs.append(0x0026) #uni0553
glyphs.append(0x0025) #uni0552
glyphs.append(0x0024) #uni0551
glyphs.append(0x0023) #uni0550
glyphs.append(0x005C) #uniFB15
glyphs.append(0x0000) #.notdef
glyphs.append(0x005A) #uniFB13
glyphs.append(0x0061) #uni0578_uni0582
glyphs.append(0x005D) #uniFB16
glyphs.append(0x005E) #uniFB17
glyphs.append(0x001B) #uni0548
glyphs.append(0x001C) #uni0549
glyphs.append(0x0017) #uni0544
glyphs.append(0x0018) #uni0545
glyphs.append(0x0019) #uni0546
glyphs.append(0x001A) #uni0547
glyphs.append(0x0013) #uni0540
glyphs.append(0x0014) #uni0541
glyphs.append(0x0015) #uni0542
glyphs.append(0x0016) #uni0543
glyphs.append(0x004A) #uni057A
glyphs.append(0x004C) #uni057C
glyphs.append(0x004B) #uni057B
glyphs.append(0x004E) #uni057E
glyphs.append(0x004D) #uni057D
glyphs.append(0x004F) #uni057F
glyphs.append(0x0049) #uni0579
glyphs.append(0x0048) #uni0578
glyphs.append(0x0041) #uni0571
glyphs.append(0x0040) #uni0570
glyphs.append(0x0043) #uni0573
glyphs.append(0x0042) #uni0572
glyphs.append(0x0045) #uni0575
glyphs.append(0x0044) #uni0574
glyphs.append(0x0047) #uni0577
glyphs.append(0x0046) #uni0576
glyphs.append(0x0020) #uni054D
glyphs.append(0x0021) #uni054E
glyphs.append(0x0022) #uni054F
glyphs.append(0x001D) #uni054A
glyphs.append(0x001E) #uni054B
glyphs.append(0x001F) #uni054C
glyphs.append(0x0036) #uni0566
glyphs.append(0x0037) #uni0567
glyphs.append(0x0034) #uni0564
glyphs.append(0x0035) #uni0565
glyphs.append(0x0032) #uni0562
glyphs.append(0x0033) #uni0563
glyphs.append(0x0031) #uni0561
glyphs.append(0x0038) #uni0568
glyphs.append(0x0039) #uni0569
glyphs.append(0x0001) #null
glyphs.append(0x0002) #nonmarkingreturn
glyphs.append(0x003F) #uni056F
glyphs.append(0x003D) #uni056D
glyphs.append(0x003E) #uni056E
glyphs.append(0x003B) #uni056B
glyphs.append(0x003C) #uni056C
glyphs.append(0x003A) #uni056A
return glyphs
| gpl-3.0 |
vincent-noel/libSigNetSim | libsignetsim/model/ListOfVariables.py | 1 | 2936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from libsignetsim.model.ListOfMathVariables import ListOfMathVariables
from libsignetsim.model.ListOfSbmlVariables import ListOfSbmlVariables
from libsignetsim.model.math.sympy_shortcuts import SympySymbol
class ListOfVariables(ListOfMathVariables, ListOfSbmlVariables, list):
""" Parent class for all the ListOf containers in a sbml model """
def __init__(self, model):
self.__model = model
ListOfMathVariables.__init__(self, model)
ListOfSbmlVariables.__init__(self, model)
list.__init__(self)
def addVariable(self, variable, string=None):
t_sbmlId = ListOfSbmlVariables.newSbmlId(self, variable, string)
list.append(self, variable)
return t_sbmlId
def removeVariable(self, variable):
list.remove(self, variable)
# Symbols
def symbols(self):
""" Return a set of symbols of the sbml variables """
return [obj.symbol.getInternalMathFormula() for obj in self]
def containsSymbol(self, symbol):
""" Returns if a symbol is in the list of sbml variables"""
for var in self:
if var.symbol.getSymbol() == symbol:
return True
return False
def getBySymbol(self, symbol):
""" Get a sbml variable by his symbol"""
for var in self:
if var.symbol.getSymbol() == symbol:
return var
def getBySymbolStr(self, symbol_str):
""" Get a sbml variable by his symbol string"""
for var in self:
if var.getSymbolStr() == symbol_str:
return var
# Renaming variable
def renameSbmlId(self, old_sbml_id, new_sbml_id):
old_symbol = SympySymbol(old_sbml_id)
if old_symbol in self.symbols():
t_var = self.getBySymbol(old_symbol)
t_var.renameSymbol(old_sbml_id, new_sbml_id)
for var in self:
var.renameSbmlIdInValue(old_sbml_id, new_sbml_id)
def clear(self):
list.__init__(self)
def getFastVariables(self):
return [species for species in self.__model.listOfSpecies if species.isOnlyInFastReactions()]
def getMixedVariables(self):
return [species for species in self.__model.listOfSpecies if species.isInFastReactions()]
def getSlowVariables(self):
return [species for species in self.__model.listOfSpecies if not species.isInFastReactions()]
| gpl-3.0 |
sunqb/oa_qian | flask/Lib/site-packages/gevent/local.py | 9 | 8772 | """
Greenlet-local objects.
This module is based on `_threading_local.py`__ from the standard
library of Python 3.4.
__ https://github.com/python/cpython/blob/3.4/Lib/_threading_local.py
Greenlet-local objects support the management of greenlet-local data.
If you have data that you want to be local to a greenlet, simply create
a greenlet-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about greenlet-local objects is that their data are
local to a greenlet. If we access the data in a different greenlet:
>>> log = []
>>> def f():
... items = list(mydata.__dict__.items())
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> greenlet = gevent.spawn(f)
>>> greenlet.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other greenlet
don't affect data seen in this greenlet:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever greenlet was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across greenlets, as they apply only to the greenlet they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate greenlet. This
is necessary to initialize each greenlet's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate greenlet:
>>> log = []
>>> greenlet = gevent.spawn(f)
>>> greenlet.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this greenlet's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not greenlet
local. They are shared across greenlets::
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate greenlet:
>>> greenlet = gevent.spawn(f)
>>> greenlet.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
.. versionchanged:: 1.1a2
Update the implementation to match Python 3.4 instead of Python 2.5.
This results in locals being eligible for garbage collection as soon
as their greenlet exits.
"""
from copy import copy
from weakref import ref
from contextlib import contextmanager
from gevent.hub import getcurrent, PYPY
from gevent.lock import RLock
__all__ = ["local"]
class _wrefdict(dict):
"""A dict that can be weak referenced"""
class _localimpl(object):
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = _wrefdict()
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = getcurrent()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = getcurrent()
idt = id(thread)
# If we are working with a gevent.greenlet.Greenlet, we can
# pro-actively clear out with a link. Use rawlink to avoid
# spawning any more greenlets
try:
rawlink = thread.rawlink
except AttributeError:
# Otherwise we need to do it with weak refs
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
_local = wrlocal()
if _local is not None:
_local.dicts.pop(idt, None)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
else:
wrdicts = ref(self.dicts)
def clear(_):
dicts = wrdicts()
if dicts:
dicts.pop(idt, None)
rawlink(clear)
wrthread = None
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
orig_dct = object.__getattribute__(self, '__dict__')
try:
dct = impl.get_dict()
except KeyError:
# it's OK to acquire the lock here and not earlier, because the above code won't switch out
# however, subclassed __init__ might switch, so we do need to acquire the lock here
dct = impl.create_dict()
args, kw = impl.localargs
with impl.locallock:
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
object.__setattr__(self, '__dict__', orig_dct)
class local(object):
"""
An object whose attributes are greenlet-local.
"""
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if args or kw:
if (PYPY and cls.__init__ == object.__init__) or (not PYPY and cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
def __copy__(self):
impl = object.__getattribute__(self, '_local__impl')
current = getcurrent()
currentId = id(current)
d = impl.get_dict()
duplicate = copy(d)
cls = type(self)
if (PYPY and cls.__init__ != object.__init__) or (not PYPY and cls.__init__ is not object.__init__):
args, kw = impl.localargs
instance = cls(*args, **kw)
else:
instance = cls()
new_impl = object.__getattribute__(instance, '_local__impl')
tpl = new_impl.dicts[currentId]
new_impl.dicts[currentId] = (tpl[0], duplicate)
return instance
| apache-2.0 |
sestrella/ansible | lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py | 19 | 24815 | #!/usr/bin/python
#
# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_cdnendpoint
version_added: "2.8"
short_description: Manage a Azure CDN endpoint
description:
- Create, update, start, stop and delete a Azure CDN endpoint.
options:
resource_group:
description:
- Name of a resource group where the Azure CDN endpoint exists or will be created.
required: true
name:
description:
- Name of the Azure CDN endpoint.
required: true
location:
description:
- Valid azure location. Defaults to location of the resource group.
started:
description:
- Use with I(state=present) to start the endpoint.
type: bool
purge:
description:
- Use with I(state=present) to purge the endpoint.
type: bool
default: false
purge_content_paths:
description:
- Use with I(state=present) and I(purge=true) to specify content paths to be purged.
type: list
default: ['/']
profile_name:
description:
- Name of the CDN profile where the endpoint attached to.
required: true
origins:
description:
- Set of source of the content being delivered via CDN.
suboptions:
name:
description:
- Origin name.
required: true
host_name:
description:
- The address of the origin.
- It can be a domain name, IPv4 address, or IPv6 address.
required: true
http_port:
description:
- The value of the HTTP port. Must be between C(1) and C(65535).
type: int
https_port:
description:
- The value of the HTTPS port. Must be between C(1) and C(65535).
type: int
required: true
origin_host_header:
description:
- The host header value sent to the origin with each request.
type: str
origin_path:
description:
- A directory path on the origin that CDN can use to retrieve content from.
- E.g. contoso.cloudapp.net/originpath.
type: str
content_types_to_compress:
description:
- List of content types on which compression applies.
- This value should be a valid MIME type.
type: list
is_compression_enabled:
description:
- Indicates whether content compression is enabled on CDN.
type: bool
default: false
is_http_allowed:
description:
- Indicates whether HTTP traffic is allowed on the endpoint.
type: bool
default: true
is_https_allowed:
description:
- Indicates whether HTTPS traffic is allowed on the endpoint.
type: bool
default: true
query_string_caching_behavior:
description:
- Defines how CDN caches requests that include query strings.
type: str
choices:
- ignore_query_string
- bypass_caching
- use_query_string
- not_set
default: ignore_query_string
state:
description:
- Assert the state of the Azure CDN endpoint. Use C(present) to create or update a Azure CDN endpoint and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yunge Zhu (@yungezz)
'''
EXAMPLES = '''
- name: Create a Azure CDN endpoint
azure_rm_cdnendpoint:
resource_group: myResourceGroup
profile_name: myProfile
name: myEndpoint
origins:
- name: TestOrig
host_name: "www.example.com"
tags:
testing: testing
delete: on-exit
foo: bar
- name: Delete a Azure CDN endpoint
azure_rm_cdnendpoint:
resource_group: myResourceGroup
profile_name: myProfile
name: myEndpoint
state: absent
'''
RETURN = '''
state:
description: Current state of the Azure CDN endpoint.
returned: always
type: str
id:
description:
- Id of the CDN endpoint.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myProfile/endpoints/
myEndpoint"
host_name:
description:
- Host name of the CDN endpoint.
returned: always
type: str
sample: "myendpoint.azureedge.net"
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from azure.mgmt.cdn.models import Endpoint, DeepCreatedOrigin, EndpointUpdateParameters, QueryStringCachingBehavior, ErrorResponseException
from azure.mgmt.cdn import CdnManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
def cdnendpoint_to_dict(cdnendpoint):
return dict(
id=cdnendpoint.id,
name=cdnendpoint.name,
type=cdnendpoint.type,
location=cdnendpoint.location,
tags=cdnendpoint.tags,
origin_host_header=cdnendpoint.origin_host_header,
origin_path=cdnendpoint.origin_path,
content_types_to_compress=cdnendpoint.content_types_to_compress,
is_compression_enabled=cdnendpoint.is_compression_enabled,
is_http_allowed=cdnendpoint.is_http_allowed,
is_https_allowed=cdnendpoint.is_https_allowed,
query_string_caching_behavior=cdnendpoint.query_string_caching_behavior,
optimization_type=cdnendpoint.optimization_type,
probe_path=cdnendpoint.probe_path,
geo_filters=[geo_filter_to_dict(geo_filter) for geo_filter in cdnendpoint.geo_filters] if cdnendpoint.geo_filters else None,
host_name=cdnendpoint.host_name,
origins=[deep_created_origin_to_dict(origin) for origin in cdnendpoint.origins] if cdnendpoint.origins else None,
resource_state=cdnendpoint.resource_state,
provisioning_state=cdnendpoint.provisioning_state
)
def deep_created_origin_to_dict(origin):
return dict(
name=origin.name,
host_name=origin.host_name,
http_port=origin.http_port,
https_port=origin.https_port,
)
def geo_filter_to_dict(geo_filter):
return dict(
relative_path=geo_filter.relative_path,
action=geo_filter.action,
country_codes=geo_filter.country_codes,
)
def default_content_types():
return ["text/plain",
"text/html",
"text/css",
"text/javascript",
"application/x-javascript",
"application/javascript",
"application/json",
"application/xml"]
origin_spec = dict(
name=dict(
type='str',
required=True
),
host_name=dict(
type='str',
required=True
),
http_port=dict(
type='int'
),
https_port=dict(
type='int'
)
)
class AzureRMCdnendpoint(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
started=dict(
type='bool'
),
purge=dict(
type='bool'
),
purge_content_paths=dict(
type='list',
elements='str',
default=['/']
),
profile_name=dict(
type='str',
required=True
),
origins=dict(
type='list',
elements='dict',
options=origin_spec
),
origin_host_header=dict(
type='str',
),
origin_path=dict(
type='str',
),
content_types_to_compress=dict(
type='list',
elements='str',
),
is_compression_enabled=dict(
type='bool',
default=False
),
is_http_allowed=dict(
type='bool',
default=True
),
is_https_allowed=dict(
type='bool',
default=True
),
query_string_caching_behavior=dict(
type='str',
choices=[
'ignore_query_string',
'bypass_caching',
'use_query_string',
'not_set'
],
default='ignore_query_string'
),
)
self.resource_group = None
self.name = None
self.state = None
self.started = None
self.purge = None
self.purge_content_paths = None
self.location = None
self.profile_name = None
self.origins = None
self.tags = None
self.origin_host_header = None
self.origin_path = None
self.content_types_to_compress = None
self.is_compression_enabled = None
self.is_http_allowed = None
self.is_https_allowed = None
self.query_string_caching_behavior = None
self.cdn_client = None
self.results = dict(changed=False)
super(AzureRMCdnendpoint, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
self.cdn_client = self.get_cdn_client()
to_be_updated = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
if self.query_string_caching_behavior:
self.query_string_caching_behavior = _snake_to_camel(self.query_string_caching_behavior)
response = self.get_cdnendpoint()
if self.state == 'present':
if not response:
if self.started is None:
# If endpoint doesn't exist and no start/stop operation specified, create endpoint.
if self.origins is None:
self.fail("Origins is not provided when trying to create endpoint")
self.log("Need to create the Azure CDN endpoint")
if not self.check_mode:
result = self.create_cdnendpoint()
self.results['id'] = result['id']
self.results['host_name'] = result['host_name']
self.log("Creation done")
self.results['changed'] = True
return self.results
else:
# Fail the module when user try to start/stop a non-existed endpoint
self.log("Can't stop/stop a non-existed endpoint")
self.fail("This endpoint is not found, stop/start is forbidden")
else:
self.log('Results : {0}'.format(response))
self.results['id'] = response['id']
self.results['host_name'] = response['host_name']
update_tags, response['tags'] = self.update_tags(response['tags'])
if update_tags:
to_be_updated = True
if response['provisioning_state'] == "Succeeded":
if self.started is False and response['resource_state'] == 'Running':
self.log("Need to stop the Azure CDN endpoint")
if not self.check_mode:
result = self.stop_cdnendpoint()
self.log("Endpoint stopped")
self.results['changed'] = True
elif self.started and response['resource_state'] == 'Stopped':
self.log("Need to start the Azure CDN endpoint")
if not self.check_mode:
result = self.start_cdnendpoint()
self.log("Endpoint started")
self.results['changed'] = True
elif self.started is not None:
self.module.warn("Start/Stop not performed due to current resource state {0}".format(response['resource_state']))
self.results['changed'] = False
if self.purge:
self.log("Need to purge endpoint")
if not self.check_mode:
result = self.purge_cdnendpoint()
self.log("Endpoint purged")
self.results['changed'] = True
to_be_updated = to_be_updated or self.check_update(response)
if to_be_updated:
self.log("Need to update the Azure CDN endpoint")
self.results['changed'] = True
if not self.check_mode:
result = self.update_cdnendpoint()
self.results['host_name'] = result['host_name']
self.log("Update done")
elif self.started is not None:
self.module.warn("Start/Stop not performed due to current provisioning state {0}".format(response['provisioning_state']))
self.results['changed'] = False
elif self.state == 'absent' and response:
self.log("Need to delete the Azure CDN endpoint")
self.results['changed'] = True
if not self.check_mode:
self.delete_cdnendpoint()
self.log("Azure CDN endpoint deleted")
return self.results
def create_cdnendpoint(self):
'''
Creates a Azure CDN endpoint.
:return: deserialized Azure CDN endpoint instance state dictionary
'''
self.log("Creating the Azure CDN endpoint instance {0}".format(self.name))
origins = []
for item in self.origins:
origins.append(
DeepCreatedOrigin(name=item['name'],
host_name=item['host_name'],
http_port=item['http_port'] if 'http_port' in item else None,
https_port=item['https_port'] if 'https_port' in item else None)
)
parameters = Endpoint(
origins=origins,
location=self.location,
tags=self.tags,
origin_host_header=self.origin_host_header,
origin_path=self.origin_path,
content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress
else self.content_types_to_compress,
is_compression_enabled=self.is_compression_enabled if self.is_compression_enabled is not None else False,
is_http_allowed=self.is_http_allowed if self.is_http_allowed is not None else True,
is_https_allowed=self.is_https_allowed if self.is_https_allowed is not None else True,
query_string_caching_behavior=self.query_string_caching_behavior if self.query_string_caching_behavior
else QueryStringCachingBehavior.ignore_query_string
)
try:
poller = self.cdn_client.endpoints.create(self.resource_group, self.profile_name, self.name, parameters)
response = self.get_poller_result(poller)
return cdnendpoint_to_dict(response)
except ErrorResponseException as exc:
self.log('Error attempting to create Azure CDN endpoint instance.')
self.fail("Error creating Azure CDN endpoint instance: {0}".format(exc.message))
def update_cdnendpoint(self):
'''
Updates a Azure CDN endpoint.
:return: deserialized Azure CDN endpoint instance state dictionary
'''
self.log("Updating the Azure CDN endpoint instance {0}".format(self.name))
endpoint_update_properties = EndpointUpdateParameters(
tags=self.tags,
origin_host_header=self.origin_host_header,
origin_path=self.origin_path,
content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress
else self.content_types_to_compress,
is_compression_enabled=self.is_compression_enabled,
is_http_allowed=self.is_http_allowed,
is_https_allowed=self.is_https_allowed,
query_string_caching_behavior=self.query_string_caching_behavior,
)
try:
poller = self.cdn_client.endpoints.update(self.resource_group, self.profile_name, self.name, endpoint_update_properties)
response = self.get_poller_result(poller)
return cdnendpoint_to_dict(response)
except ErrorResponseException as exc:
self.log('Error attempting to update Azure CDN endpoint instance.')
self.fail("Error updating Azure CDN endpoint instance: {0}".format(exc.message))
def delete_cdnendpoint(self):
'''
Deletes the specified Azure CDN endpoint in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Azure CDN endpoint {0}".format(self.name))
try:
poller = self.cdn_client.endpoints.delete(
self.resource_group, self.profile_name, self.name)
self.get_poller_result(poller)
return True
except ErrorResponseException as e:
self.log('Error attempting to delete the Azure CDN endpoint.')
self.fail("Error deleting the Azure CDN endpoint: {0}".format(e.message))
return False
def get_cdnendpoint(self):
'''
Gets the properties of the specified Azure CDN endpoint.
:return: deserialized Azure CDN endpoint state dictionary
'''
self.log(
"Checking if the Azure CDN endpoint {0} is present".format(self.name))
try:
response = self.cdn_client.endpoints.get(self.resource_group, self.profile_name, self.name)
self.log("Response : {0}".format(response))
self.log("Azure CDN endpoint : {0} found".format(response.name))
return cdnendpoint_to_dict(response)
except ErrorResponseException:
self.log('Did not find the Azure CDN endpoint.')
return False
def start_cdnendpoint(self):
'''
Starts an existing Azure CDN endpoint that is on a stopped state.
:return: deserialized Azure CDN endpoint state dictionary
'''
self.log(
"Starting the Azure CDN endpoint {0}".format(self.name))
try:
poller = self.cdn_client.endpoints.start(self.resource_group, self.profile_name, self.name)
response = self.get_poller_result(poller)
self.log("Response : {0}".format(response))
self.log("Azure CDN endpoint : {0} started".format(response.name))
return self.get_cdnendpoint()
except ErrorResponseException:
self.log('Fail to start the Azure CDN endpoint.')
return False
def purge_cdnendpoint(self):
'''
Purges an existing Azure CDN endpoint.
:return: deserialized Azure CDN endpoint state dictionary
'''
self.log(
"Purging the Azure CDN endpoint {0}".format(self.name))
try:
poller = self.cdn_client.endpoints.purge_content(self.resource_group,
self.profile_name,
self.name,
content_paths=self.purge_content_paths)
response = self.get_poller_result(poller)
self.log("Response : {0}".format(response))
return self.get_cdnendpoint()
except ErrorResponseException as e:
self.log('Fail to purge the Azure CDN endpoint.')
return False
def stop_cdnendpoint(self):
'''
Stops an existing Azure CDN endpoint that is on a running state.
:return: deserialized Azure CDN endpoint state dictionary
'''
self.log(
"Stopping the Azure CDN endpoint {0}".format(self.name))
try:
poller = self.cdn_client.endpoints.stop(self.resource_group, self.profile_name, self.name)
response = self.get_poller_result(poller)
self.log("Response : {0}".format(response))
self.log("Azure CDN endpoint : {0} stopped".format(response.name))
return self.get_cdnendpoint()
except ErrorResponseException:
self.log('Fail to stop the Azure CDN endpoint.')
return False
def check_update(self, response):
if self.origin_host_header and response['origin_host_header'] != self.origin_host_header:
self.log("Origin host header Diff - Origin {0} / Update {1}".format(response['origin_host_header'], self.origin_host_header))
return True
if self.origin_path and response['origin_path'] != self.origin_path:
self.log("Origin path Diff - Origin {0} / Update {1}".format(response['origin_path'], self.origin_path))
return True
if self.content_types_to_compress and response['content_types_to_compress'] != self.content_types_to_compress:
self.log("Content types to compress Diff - Origin {0} / Update {1}".format(response['content_types_to_compress'], self.content_types_to_compress))
return True
if self.is_compression_enabled is not None and response['is_compression_enabled'] != self.is_compression_enabled:
self.log("is_compression_enabled Diff - Origin {0} / Update {1}".format(response['is_compression_enabled'], self.is_compression_enabled))
return True
if self.is_http_allowed is not None and response['is_http_allowed'] != self.is_http_allowed:
self.log("is_http_allowed Diff - Origin {0} / Update {1}".format(response['is_http_allowed'], self.is_http_allowed))
return True
if self.is_https_allowed is not None and response['is_https_allowed'] != self.is_https_allowed:
self.log("is_https_allowed Diff - Origin {0} / Update {1}".format(response['is_https_allowed'], self.is_https_allowed))
return True
if self.query_string_caching_behavior and \
_snake_to_camel(response['query_string_caching_behavior']).lower() != _snake_to_camel(self.query_string_caching_behavior).lower():
self.log("query_string_caching_behavior Diff - Origin {0} / Update {1}".format(response['query_string_caching_behavior'],
self.query_string_caching_behavior))
return True
return False
def get_cdn_client(self):
if not self.cdn_client:
self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-04-02')
return self.cdn_client
def main():
"""Main execution"""
AzureRMCdnendpoint()
if __name__ == '__main__':
main()
| gpl-3.0 |
Nikoala/CouchPotatoServer | libs/xmpp/transports.py | 89 | 15453 | ## transports.py
##
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: transports.py,v 1.35 2009/04/07 08:34:09 snakeru Exp $
"""
This module contains the low-level implementations of xmpppy connect methods or
(in other words) transports for xmpp-stanzas.
Currently here is three transports:
direct TCP connect - TCPsocket class
proxied TCP connect - HTTPPROXYsocket class (CONNECT proxies)
TLS connection - TLS class. Can be used for SSL connections also.
Transports are stackable so you - f.e. TLS use HTPPROXYsocket or TCPsocket as more low-level transport.
Also exception 'error' is defined to allow capture of this module specific exceptions.
"""
import socket, select, base64, dispatcher, sys
from simplexml import ustr
from client import PlugIn
from protocol import *
# determine which DNS resolution library is available
HAVE_DNSPYTHON = False
HAVE_PYDNS = False
try:
import dns.resolver # http://dnspython.org/
HAVE_DNSPYTHON = True
except ImportError:
try:
import DNS # http://pydns.sf.net/
HAVE_PYDNS = True
except ImportError:
pass
DATA_RECEIVED = 'DATA RECEIVED'
DATA_SENT = 'DATA SENT'
class error:
"""An exception to be raised in case of low-level errors in methods of 'transports' module."""
def __init__(self, comment):
"""Cache the descriptive string"""
self._comment = comment
def __str__(self):
"""Serialise exception into pre-cached descriptive string."""
return self._comment
BUFLEN = 1024
class TCPsocket(PlugIn):
""" This class defines direct TCP connection method. """
def __init__(self, server = None, use_srv = True):
""" Cache connection point 'server'. 'server' is the tuple of (host, port)
absolutely the same as standard tcp socket uses. However library will lookup for
('_xmpp-client._tcp.' + host) SRV record in DNS and connect to the found (if it is)
server instead
"""
PlugIn.__init__(self)
self.DBG_LINE = 'socket'
self._exported_methods = [self.send, self.disconnect]
self._server, self.use_srv = server, use_srv
def srv_lookup(self, server):
" SRV resolver. Takes server=(host, port) as argument. Returns new (host, port) pair "
if HAVE_DNSPYTHON or HAVE_PYDNS:
host, port = server
possible_queries = ['_xmpp-client._tcp.' + host]
for query in possible_queries:
try:
if HAVE_DNSPYTHON:
answers = [x for x in dns.resolver.query(query, 'SRV')]
if answers:
host = str(answers[0].target)
port = int(answers[0].port)
break
elif HAVE_PYDNS:
# ensure we haven't cached an old configuration
DNS.DiscoverNameServers()
response = DNS.Request().req(query, qtype = 'SRV')
answers = response.answers
if len(answers) > 0:
# ignore the priority and weight for now
_, _, port, host = answers[0]['data']
del _
port = int(port)
break
except:
self.DEBUG('An error occurred while looking up %s' % query, 'warn')
server = (host, port)
else:
self.DEBUG("Could not load one of the supported DNS libraries (dnspython or pydns). SRV records will not be queried and you may need to set custom hostname/port for some servers to be accessible.\n", 'warn')
# end of SRV resolver
return server
def plugin(self, owner):
""" Fire up connection. Return non-empty string on success.
Also registers self.disconnected method in the owner's dispatcher.
Called internally. """
if not self._server: self._server = (self._owner.Server, 5222)
if self.use_srv: server = self.srv_lookup(self._server)
else: server = self._server
if not self.connect(server): return
self._owner.Connection = self
self._owner.RegisterDisconnectHandler(self.disconnected)
return 'ok'
def getHost(self):
""" Return the 'host' value that is connection is [will be] made to."""
return self._server[0]
def getPort(self):
""" Return the 'port' value that is connection is [will be] made to."""
return self._server[1]
def connect(self, server = None):
""" Try to connect to the given host/port. Does not lookup for SRV record.
Returns non-empty string on success. """
try:
if not server: server = self._server
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((server[0], int(server[1])))
self._send = self._sock.sendall
self._recv = self._sock.recv
self.DEBUG("Successfully connected to remote host %s" % `server`, 'start')
return 'ok'
except socket.error, (errno, strerror):
self.DEBUG("Failed to connect to remote host %s: %s (%s)" % (`server`, strerror, errno), 'error')
except: pass
def plugout(self):
""" Disconnect from the remote server and unregister self.disconnected method from
the owner's dispatcher. """
self._sock.close()
if self._owner.__dict__.has_key('Connection'):
del self._owner.Connection
self._owner.UnregisterDisconnectHandler(self.disconnected)
def receive(self):
""" Reads all pending incoming data.
In case of disconnection calls owner's disconnected() method and then raises IOError exception."""
try: received = self._recv(BUFLEN)
except socket.sslerror, e:
self._seen_data = 0
if e[0] == socket.SSL_ERROR_WANT_READ: return ''
if e[0] == socket.SSL_ERROR_WANT_WRITE: return ''
self.DEBUG('Socket error while receiving data', 'error')
sys.exc_clear()
self._owner.disconnected()
raise IOError("Disconnected from server")
except: received = ''
while self.pending_data(0):
try: add = self._recv(BUFLEN)
except: add = ''
received += add
if not add: break
if len(received): # length of 0 means disconnect
self._seen_data = 1
self.DEBUG(received, 'got')
if hasattr(self._owner, 'Dispatcher'):
self._owner.Dispatcher.Event('', DATA_RECEIVED, received)
else:
self.DEBUG('Socket error while receiving data', 'error')
self._owner.disconnected()
raise IOError("Disconnected from server")
return received
def send(self, raw_data):
""" Writes raw outgoing data. Blocks until done.
If supplied data is unicode string, encodes it to utf-8 before send."""
if type(raw_data) == type(u''): raw_data = raw_data.encode('utf-8')
elif type(raw_data) <> type(''): raw_data = ustr(raw_data).encode('utf-8')
try:
self._send(raw_data)
# Avoid printing messages that are empty keepalive packets.
if raw_data.strip():
self.DEBUG(raw_data, 'sent')
if hasattr(self._owner, 'Dispatcher'): # HTTPPROXYsocket will send data before we have a Dispatcher
self._owner.Dispatcher.Event('', DATA_SENT, raw_data)
except:
self.DEBUG("Socket error while sending data", 'error')
self._owner.disconnected()
def pending_data(self, timeout = 0):
""" Returns true if there is a data ready to be read. """
return select.select([self._sock], [], [], timeout)[0]
def disconnect(self):
""" Closes the socket. """
self.DEBUG("Closing socket", 'stop')
self._sock.close()
def disconnected(self):
""" Called when a Network Error or disconnection occurs.
Designed to be overidden. """
self.DEBUG("Socket operation failed", 'error')
DBG_CONNECT_PROXY = 'CONNECTproxy'
class HTTPPROXYsocket(TCPsocket):
""" HTTP (CONNECT) proxy connection class. Uses TCPsocket as the base class
redefines only connect method. Allows to use HTTP proxies like squid with
(optionally) simple authentication (using login and password). """
def __init__(self, proxy, server, use_srv = True):
""" Caches proxy and target addresses.
'proxy' argument is a dictionary with mandatory keys 'host' and 'port' (proxy address)
and optional keys 'user' and 'password' to use for authentication.
'server' argument is a tuple of host and port - just like TCPsocket uses. """
TCPsocket.__init__(self, server, use_srv)
self.DBG_LINE = DBG_CONNECT_PROXY
self._proxy = proxy
def plugin(self, owner):
""" Starts connection. Used interally. Returns non-empty string on success."""
owner.debug_flags.append(DBG_CONNECT_PROXY)
return TCPsocket.plugin(self, owner)
def connect(self, dupe = None):
""" Starts connection. Connects to proxy, supplies login and password to it
(if were specified while creating instance). Instructs proxy to make
connection to the target server. Returns non-empty sting on success. """
if not TCPsocket.connect(self, (self._proxy['host'], self._proxy['port'])): return
self.DEBUG("Proxy server contacted, performing authentification", 'start')
connector = ['CONNECT %s:%s HTTP/1.0' % self._server,
'Proxy-Connection: Keep-Alive',
'Pragma: no-cache',
'Host: %s:%s' % self._server,
'User-Agent: HTTPPROXYsocket/v0.1']
if self._proxy.has_key('user') and self._proxy.has_key('password'):
credentials = '%s:%s' % (self._proxy['user'], self._proxy['password'])
credentials = base64.encodestring(credentials).strip()
connector.append('Proxy-Authorization: Basic ' + credentials)
connector.append('\r\n')
self.send('\r\n'.join(connector))
try: reply = self.receive().replace('\r', '')
except IOError:
self.DEBUG('Proxy suddenly disconnected', 'error')
self._owner.disconnected()
return
try: proto, code, desc = reply.split('\n')[0].split(' ', 2)
except: raise error('Invalid proxy reply')
if code <> '200':
self.DEBUG('Invalid proxy reply: %s %s %s' % (proto, code, desc), 'error')
self._owner.disconnected()
return
while reply.find('\n\n') == -1:
try: reply += self.receive().replace('\r', '')
except IOError:
self.DEBUG('Proxy suddenly disconnected', 'error')
self._owner.disconnected()
return
self.DEBUG("Authentification successfull. Jabber server contacted.", 'ok')
return 'ok'
def DEBUG(self, text, severity):
"""Overwrites DEBUG tag to allow debug output be presented as "CONNECTproxy"."""
return self._owner.DEBUG(DBG_CONNECT_PROXY, text, severity)
class TLS(PlugIn):
""" TLS connection used to encrypts already estabilished tcp connection."""
def PlugIn(self, owner, now = 0):
""" If the 'now' argument is true then starts using encryption immidiatedly.
If 'now' in false then starts encryption as soon as TLS feature is
declared by the server (if it were already declared - it is ok).
"""
if owner.__dict__.has_key('TLS'): return # Already enabled.
PlugIn.PlugIn(self, owner)
DBG_LINE = 'TLS'
if now: return self._startSSL()
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher, self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandlerOnce('features', self.FeaturesHandler, xmlns = NS_STREAMS)
self.starttls = None
def plugout(self, now = 0):
""" Unregisters TLS handler's from owner's dispatcher. Take note that encription
can not be stopped once started. You can only break the connection and start over."""
self._owner.UnregisterHandler('features', self.FeaturesHandler, xmlns = NS_STREAMS)
self._owner.UnregisterHandler('proceed', self.StartTLSHandler, xmlns = NS_TLS)
self._owner.UnregisterHandler('failure', self.StartTLSHandler, xmlns = NS_TLS)
def FeaturesHandler(self, conn, feats):
""" Used to analyse server <features/> tag for TLS support.
If TLS is supported starts the encryption negotiation. Used internally"""
if not feats.getTag('starttls', namespace = NS_TLS):
self.DEBUG("TLS unsupported by remote server.", 'warn')
return
self.DEBUG("TLS supported by remote server. Requesting TLS start.", 'ok')
self._owner.RegisterHandlerOnce('proceed', self.StartTLSHandler, xmlns = NS_TLS)
self._owner.RegisterHandlerOnce('failure', self.StartTLSHandler, xmlns = NS_TLS)
self._owner.Connection.send('<starttls xmlns="%s"/>' % NS_TLS)
raise NodeProcessed
def pending_data(self, timeout = 0):
""" Returns true if there possible is a data ready to be read. """
return self._tcpsock._seen_data or select.select([self._tcpsock._sock], [], [], timeout)[0]
def _startSSL(self):
""" Immidiatedly switch socket to TLS mode. Used internally."""
""" Here we should switch pending_data to hint mode."""
tcpsock = self._owner.Connection
tcpsock._sslObj = socket.ssl(tcpsock._sock, None, None)
tcpsock._sslIssuer = tcpsock._sslObj.issuer()
tcpsock._sslServer = tcpsock._sslObj.server()
tcpsock._recv = tcpsock._sslObj.read
tcpsock._send = tcpsock._sslObj.write
tcpsock._seen_data = 1
self._tcpsock = tcpsock
tcpsock.pending_data = self.pending_data
tcpsock._sock.setblocking(0)
self.starttls = 'success'
def StartTLSHandler(self, conn, starttls):
""" Handle server reply if TLS is allowed to process. Behaves accordingly.
Used internally."""
if starttls.getNamespace() <> NS_TLS: return
self.starttls = starttls.getName()
if self.starttls == 'failure':
self.DEBUG("Got starttls response: " + self.starttls, 'error')
return
self.DEBUG("Got starttls proceed response. Switching to TLS/SSL...", 'ok')
self._startSSL()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
| gpl-3.0 |
zaxliu/scipy | scipy/special/tests/test_ellip_harm.py | 99 | 9413 | #
# Tests for the Ellipsoidal Harmonic Function,
# Distributed under the same license as SciPy itself.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, run_module_suite)
from scipy.special._testutils import assert_func_equal
from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
from scipy.integrate import IntegrationWarning
from numpy import sqrt, pi
def test_ellip_potential():
def change_coefficient(lambda1, mu, nu, h2, k2):
x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
return x, y, z
def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
* ellip_harm(h2, k2, n, p, nu))
def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm_2(h2, k2, n, p, lambda1)
* ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
tol = 1e-8
sum1 = 0
for n in range(20):
xsum = 0
for p in range(1, 2*n+2):
xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
* solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
(ellip_normal(h2, k2, n, p)*(2*n + 1)))
if abs(xsum) < 0.1*tol*abs(sum1):
break
sum1 += xsum
return sum1, xsum
def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
return 1/res
pts = [
(120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
(120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=IntegrationWarning)
for p in pts:
err_msg = repr(p)
exact = potential(*p)
result, last_term = summation(*p)
assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg)
assert_(abs(result - exact) < 10*abs(last_term), err_msg)
def test_ellip_norm():
def G01(h2, k2):
return 4*pi
def G11(h2, k2):
return 4*pi*h2*k2/3
def G12(h2, k2):
return 4*pi*h2*(k2 - h2)/3
def G13(h2, k2):
return 4*pi*k2*(k2 - h2)/3
def G22(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 +
sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G21(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2
+ sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G23(h2, k2):
return 4*pi*h2**2*k2*(k2 - h2)/15
def G24(h2, k2):
return 4*pi*h2*k2**2*(k2 - h2)/15
def G25(h2, k2):
return 4*pi*h2*k2*(k2 - h2)**2/15
def G32(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) +
11*h2*k2*(h2 + k2)))
return 16*pi/13125*k2*h2*res
def G31(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) -
11*h2*k2*(h2 + k2)))
return 16*pi/13125*h2*k2*res
def G34(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 +
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G33(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 -
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G36(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 +
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G35(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 -
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G37(h2, k2):
return 4*pi*h2**2*k2**2*(k2 - h2)**2/105
known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13,
(2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24,
(2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33,
(3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37}
def _ellip_norm(n, p, h2, k2):
func = known_funcs[n, p]
return func(h2, k2)
_ellip_norm = np.vectorize(_ellip_norm)
def ellip_normal_known(h2, k2, n, p):
return _ellip_norm(n, p, h2, k2)
# generate both large and small h2 < k2 pairs
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=1)
k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))
points = []
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2, k2, n*np.ones(h2.size), p*np.ones(h2.size)))
points = np.array(points)
with warnings.catch_warnings(record=True): # occurrence of roundoff ...
assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
def test_ellip_harm_2():
def I1(h2, k2, s):
res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s))
+ ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) +
ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s)))
return res
with warnings.catch_warnings(record=True): # occurrence of roundoff ...
assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8))))
# Values produced by code from arXiv:1204.0267
assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382)
assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809)
assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743)
assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306)
assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454)
def test_ellip_harm():
def E01(h2, k2, s):
return 1
def E11(h2, k2, s):
return s
def E12(h2, k2, s):
return sqrt(abs(s*s - h2))
def E13(h2, k2, s):
return sqrt(abs(s*s - k2))
def E21(h2, k2, s):
return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E22(h2, k2, s):
return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E23(h2, k2, s):
return s * sqrt(abs(s*s - h2))
def E24(h2, k2, s):
return s * sqrt(abs(s*s - k2))
def E25(h2, k2, s):
return sqrt(abs((s*s - h2)*(s*s - k2)))
def E31(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E32(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E33(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E34(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E35(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E36(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E37(h2, k2, s):
return s * sqrt(abs((s*s - h2)*(s*s - k2)))
assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1),
ellip_harm(5, 8, 1, 2, 2.5))
known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13,
(2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24,
(2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33,
(3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37}
point_ref = []
def ellip_harm_known(h2, k2, n, p, s):
for i in range(h2.size):
func = known_funcs[(int(n[i]), int(p[i]))]
point_ref.append(func(h2[i], k2[i], s[i]))
return point_ref
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=30)
k2 = h2*(1 + np.random.pareto(0.5, size=h2.size))
s = np.random.pareto(0.5, size=h2.size)
points = []
for i in range(h2.size):
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2[i], k2[i], n, p, s[i]))
points = np.array(points)
assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
kwagyeman/openmv | scripts/examples/OpenMV/27-Lepton/lepton_hotspot_rgb565_color_tracking.py | 3 | 1754 | # Single Color RGB565 Blob Tracking Example
#
# This example shows off single color RGB565 tracking using the OpenMV Cam using the FLIR LEPTON.
# FLIR Lepton Shutter Note: FLIR Leptons with radiometry and a shutter will pause the video often
# as they heatup to re-calibrate. This will happen less and less often as the sensor temperature
# stablizes. You can force the re-calibration to not happen if you need to via the lepton API.
# However, it is not recommended because the image will degrade overtime.
import sensor, image, time, math
# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
threshold_list = [( 70, 100, -30, 40, 20, 100)]
print("Resetting Lepton...")
# These settings are applied on reset
sensor.reset()
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
# Make the color palette cool
sensor.set_color_palette(sensor.PALETTE_IRONBOW)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time=5000)
clock = time.clock()
# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.
while(True):
clock.tick()
img = sensor.snapshot()
for blob in img.find_blobs(threshold_list, pixels_threshold=200, area_threshold=200, merge=True):
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(), blob.cy())
print(clock.fps())
| mit |
KaranToor/MA450 | google-cloud-sdk/lib/surface/emulators/pubsub/start.py | 4 | 1561 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud pubsub emulator start command."""
from googlecloudsdk.api_lib.emulators import pubsub_util
from googlecloudsdk.api_lib.emulators import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
class Start(base.Command):
"""Start a local pubsub emulator.
This command starts a local pubsub emulator.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To start a local pubsub emulator, run:
$ {command} --data-dir DATA-DIR
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--host-port',
required=False,
type=arg_parsers.HostPort.Parse,
help='The host:port to which the emulator should be bound.')
# Override
def Run(self, args):
if not args.host_port:
args.host_port = arg_parsers.HostPort.Parse(util.GetHostPort(
pubsub_util.PUBSUB))
pubsub_util.Start(args)
| apache-2.0 |
mdaal/KAM | Quantum_Model_2.py | 1 | 15171 | import numpy as np
from matplotlib import pylab, mlab, pyplot
plt = pyplot
MHz = np.power(10,6)
# w_0 = 2*np.pi*f_0
# psi_1 = 0
# phi_r = np.pi / 2
# K = -1.0 * np.power(10,-4.0)*w_0
# gamma_nl = 0.01*K/np.sqrt(3)
# gamma_r = 0.01 * w_0
# gamma_l = 1.1 * gamma_r
# sigmap_12 = 0.5
# sigmap_13 = np.power(0.5,0.5)
# b1c_in = np.sqrt((4/(3*np.sqrt(3))) * np.power(sigmap_13*gamma_r,-1) * np.power((gamma_r+gamma_l)/(np.abs(K)-np.sqrt(3)*gamma_nl), 3) * (K*K + gamma_nl*gamma_nl)) #Threshold of bi-stability
# b1_in = .8* b1c_in
#computed values:
# psi_2, V3, phi_B, b2_out
Manual = False
Use_Run_45a = 1
if Use_Run_45a:
#fit, fig, ax = Run45aP.nonlinear_fit(Save_Fig = True, Indexing = (None,-1,None))
bestfit = 'Powell'
eta = fit[bestfit].x[4] #
delta = fit[bestfit].x[5]#
f_0 = fit[bestfit].x[0] #
Qtl = fit[bestfit].x[1]#
Qc = fit[bestfit].x[2]#
phi31 = fit[bestfit].x[3]
Z1 = Run45aP.metadata.Feedline_Impedance
Z3 = Run45aP.metadata.Resonator_Impedance
V30 = np.sqrt(fit['V30V30'])
Pprobe_dBm = -54
#dBm
phiV1 = 0
if Manual:
f_0 = 700*MHz
delta =0.009 #freq nonlinearity
eta = 10#5.9 #Q nonlinearity
V30 = 0.1
Qtl = 100e3
Qc = 30e3
Z3 = 50.0
Z1 = 50.0
Pprobe_dBm = -65
phi31 = np.pi/2.05 #np.pi/2.015
phiV1 = 1*np.pi/10
Z2 = Z1
Pprobe = 0.001* np.power(10.0,Pprobe_dBm/10.0)
V1V1 = Pprobe *2*Z1
V1 = np.sqrt(V1V1) * np.exp(np.complex(0,1)*phiV1)
V30V30 = np.square(np.abs(V30))
Q = 1.0/ ((1.0/Qtl) + (1.0/Qc))
#tau = 50e-9 #cable delay - only affect V3, but not V2
################################# Create f array making sure it contains f_0
numBW = 20
BW = numBW*f_0/Q # 2*(f_0 * 0.25)
num = 3000
if 1: #Triangular numbers
T = np.linspace(1, num, num=num, endpoint=True, retstep=False, dtype=None)
T = T*(T+1.0)/2.0
f_plus = (T*(BW/2)/T[-1]) + f_0
f_minus = (-T[::-1]/T[-1])*(BW/2) + f_0
f = np.hstack((f_minus,f_0,f_plus))
if 0: #linear
f_plus = np.linspace(f_0, f_0 + BW/2, num=num, endpoint=True, retstep=False, dtype=None)
f_minus = np.linspace(f_0 - BW/2,f_0, num=num-1, endpoint=False, retstep=False, dtype=None)
f = np.hstack((f_minus,f_plus))
if 0: #logerithmic
f_plus = np.logspace(np.log10(f_0), np.log10(f_0 + BW/2), num=num, endpoint=True, dtype=None)
f_minus = -f_plus[:0:-1] + 2*f_0
f = np.hstack((f_minus,f_plus))
#################################
Number_of_Roots = 3
V3V3 = np.ma.empty((f.shape[0],Number_of_Roots), dtype = np.complex128)
V3 = np.ma.empty_like(V3V3)
exp_phi_V3 = np.ma.empty_like(V3V3)
V2_out = np.ma.empty_like(V3V3)
V3V3_up = np.empty_like(f)
V3_up = np.empty_like(f)
V2_out_up = np.empty(f.shape, dtype = np.complex128)
V3V3_down = np.empty(f.shape)
V3_down = np.empty_like(f)
V2_out_down = np.empty_like(f,dtype = np.complex128)
################ 3rd Version
for n in xrange(f.shape[0]):
coefs = np.array([np.square(delta * f[n]/V30V30 )+ np.square(eta*f_0/(2*Qtl*V30V30)), 2*(delta*(f[n]-f_0)*f[n]/V30V30 + eta*f_0*f_0/(4*Qtl*V30V30*Q) ),np.square(f[n]-f_0) + np.square(f_0/(2*Q)), -1.0*f_0*f_0*Z3*V1V1/(4*np.pi*Qc*Z1)])
V3V3[n] =np.ma.array(np.roots(coefs),mask= np.iscomplex(np.roots(coefs)),fill_value = 1)
V3[n] = np.ma.sqrt(V3V3[n])
# exp_phi_V3[n] is e^{i phi_V3} - no minus sign
exp_phi_V3[n] = f_0*np.exp(np.complex(0,1.0)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) *np.power( ( ((f_0/(2*Q)) + np.complex(0,1)*(f[n]-f_0)) * V3[n]) + ((eta*f_0/(2*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3[n]*V3[n],-1.0 )
# V2_out[n] is V_2^out * e^(-i phi_2)
V2_out[n] = V1*((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3[n]/V30V30) + np.complex(0,2)* (((f[n]-f_0)/f_0) + delta*(V3V3[n]/V30V30)*(f[n]/f_0))))*np.exp(np.complex(0,2.0)*phi31))
# calculate observed for upsweep and down sweep
# min |--> up sweep (like at UCB),
# max |--> down sweep
V3V3_down[n] = np.extract(~V3V3[n].mask,V3V3[n]).max().real
V3_down[n] = np.sqrt(V3V3_down[n])
V2_out_down[n] = V1*((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3_down[n]/V30V30) + np.complex(0,2)* (((f[n]-f_0)/f_0) + delta*(V3V3_down[n]/V30V30)*(f[n]/f_0))))*np.exp(np.complex(0,2.0)*phi31))
V3V3_up[n] = np.extract(~V3V3[n].mask,V3V3[n]).min().real
V3_up[n] = np.sqrt(V3V3_up[n])
V2_out_up[n] = V1*((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3_up[n]/V30V30) + np.complex(0,2)* (((f[n]-f_0)/f_0) + delta*(V3V3_up[n]/V30V30)*(f[n]/f_0))))*np.exp(np.complex(0,2.0)*phi31))
####################
# sV3V3_up = ((2.0*V1/Qc) * np.power(2*V2_out_up*np.exp(np.complex(0,-2.0)*phi31)+V1*(1-np.exp(np.complex(0,-2.0)*phi31)),-1) - (1.0/Qc) - (1.0/Qtl) - (np.complex(0,2.0) * (f-f_0)/f_0)) * V30V30 *np.power((eta/Qtl) + np.complex(0,2.0)*(delta*f/f_0),-1)
# sV3V3_up = sV3V3_up.real
# sV3_up = np.sqrt(sV3V3_up)
# residual1 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# residual2 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# residual3 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# residual4 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# for n in xrange(f.shape[0]):
# coefs = np.array([np.square(delta * f[n]/V30V30 )+ np.square(eta*f_0/(2*Qtl*V30V30)), 2*(delta*(f[n]-f_0)*f[n]/V30V30 + eta*f_0*f_0/(4*Qtl*V30V30*Q) ),np.square(f[n]-f_0) + np.square(f_0/(2*Q)), -1.0*f_0*f_0*Z3*V1V1/(4*np.pi*Qc*Z1)])
# residual1[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) - (( ((f_0/(2.0*Q)) + np.complex(0,1.0)*(f[n]-f_0)) * sV3_up[n]) + ((eta*f_0/(2.0*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* sV3V3_up[n]*sV3_up[n])
# residual2[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) - (( ((f_0/(2.0*Q)) + np.complex(0,1.0)*(f[n]-f_0)) * V3_up[n]) + ((eta*f_0/(2.0*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3_up[n]* V3_up[n])
# residual3[n] = np.polyval(coefs,sV3V3_up[n] ) # Exaluate the V3V3 qubic using the sV3V3_up synthesized from S21
# residual4[n] = np.polyval(coefs,V3V3_up[n] ) # Exaluate the V3V3 qubic using the V3V3_up computed from polynomial roots
# #if residual2 - residual3 = 0 then V3V3_up = sV3V3_up to high enough accuracy
# sumsq = np.square(residual2).sum()
#We use the solution to the cubic for for one scan direction to construct the other two solutions
V2cubic = V2_out_down
S21 = V2cubic/V1
V3__ = np.empty_like(f)
V3__ = (S21 + (np.exp(np.complex(0,2.0)*phi31)-1)/2.)*V1*np.sqrt(Z3*Qc/(Z1*np.pi))* np.exp(np.complex(0,-1.0)*phi31)
z1 = eta/(Qtl*V30V30)+ np.complex(0,1.0)*(2*delta*f)/(V30V30*f_0)
z2 = (1.0/Qc) + (1/Qtl) + np.complex(0,2.0) *(f-f_0)/f_0
z1z2c = z1*z2.conjugate()
z1z1 = z1*z1.conjugate()
z2z2 = z2*z2.conjugate()
v1 = V3__*V3__.conjugate()
term1 = -(z1z2c.real/z1z1) - v1/2.0
term2 = np.complex(0,1)*np.sqrt(4*z1z2c.imag*z1z2c.imag + 3*v1*v1*z1z1*z1z1 + 4*z1z1*z1z2c.real*v1)/(2*z1z1)
v2 = term1 + term2
v3 = term1 - term2
V3p__ = np.sqrt(v2)
V3m__ = np.sqrt(v3)
S21p= ((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*v2/V30V30) + np.complex(0,2)* (((f-f_0)/f_0) + delta*(v2/V30V30)*(f/f_0))))*np.exp(np.complex(0,2.0)*phi31))
S21m = ((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*v3/V30V30) + np.complex(0,2)* (((f-f_0)/f_0) + delta*(v3/V30V30)*(f/f_0))))*np.exp(np.complex(0,2.0)*phi31))
#V3c__ = V3__.conjugate()
#f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) - (( ((f_0/(2.0*Q)) + np.complex(0,1.0)*(f[n]-f_0)) * sV3_up[n]) + ((eta*f_0/(2.0*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* sV3V3_up[n]*sV3_up[n])
############### 2nd Version
# def roots(freq):
# coefs = [np.square(delta * freq/V30V30 )+ np.square(eta*f_0/(4*Qtl*V30V30)), 2*(delta*(freq-f_0)*freq/V30V30 + eta*f_0*f_0/(4*Qtl*V30V30*Q) ),np.square(freq-f_0) + np.square(f_0/(2*Q)), -1.0*f_0*f_0*Z3*V1V1/(4*np.pi*Qc*Z1)]
# return np.roots(coefs)
# for n in xrange(f.shape[0]):
# V3V3[n] = roots(f[n])
# V3[n] = np.sqrt(V3V3[n])
# test[n] = V3[n]
# # exp_phi_V3[n] is e^{i phi_V3} - no minus sign
# exp_phi_V3[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) / ( ((f_0/(2*Q)) + np.complex(0,1)*(f[n]-f_0) *V3[n]) + ((eta*f_0/(2*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3[n]*V3[n] )
# # V2_out[n] is V_2^out * e^(-i phi_2)
# V2_out[n] = V1*(1 -( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3[n]/V30V30) + 2*np.complex(0,1)* (((f[n]-f_0)/f_0) + delta*(V3V3[n]/V30V30)*(f[n]/f_0)))))
# V3V3 = np.ma.masked_where(np.iscomplex(V3V3), V3V3, copy=True)
# V3V3.fill_value = 1
# V3.mask = V3V3.mask
# exp_phi_V3.mask = V3V3.mask
# V2_out.mask = V3V3.mask
# for n in xrange(f.shape[0]):
# #calculate observed upsweep values
# V3V3_up[n] = np.max(np.abs(V3V3[n].compressed()))
# V3_up[n] = np.sqrt(V3V3_up[n])
# V2_out_up[n] = V1*(1 -( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3_up[n]/V30V30) + 2*np.complex(0,1)* (((f[n]-f_0)/f_0) + delta*(V3V3_up[n]/V30V30)*(f[n]/f_0)))))
#################
# ################## 1st Version
# for n in xrange(f.shape[0]):
# V3V3[n] = roots(f[n])
# #Where are there 3 real solutions?
# # if np.isreal(V3V3[n,0]) & np.isreal(V3V3[n,1]) & np.isreal(V3V3[n,2]):
# # print(n)
# V3V3 = np.ma.masked_where(np.iscomplex(V3V3), V3V3, copy=True)
# V3 = np.sqrt(V3V3)
# exp_phi_V3 = np.ma.empty_like(V3)
# V2_out = np.ma.empty_like(V3)
# for n in xrange(f.shape[0]):
# # exp_phi_V3[n] is e^{i phi_V3} - no minus sign
# exp_phi_V3[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) / ( ((f_0/(2*Q)) + np.complex(0,1)*(f[n]-f_0) *V3[n]) + ((eta*f_0/(2*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3[n]*V3[n] )
# # V2_out_phasor[n] is V_2^out * e^(-i phi_2)
# V2_out[n] = V1*(1 -( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3[n]/V30V30) + 2*np.complex(0,1)* (((f[n]-f_0)/f_0) + delta*(V3V3[n]/V30V30)*(f[n]/f_0)))))
# ##################
fig = plt.figure( figsize=(6, 6), dpi=150)
ax = {}
ax[1] = fig.add_subplot(2,2,1)
dff = (f - f_0)/f_0
trans = (V2_out/V1)
# dfff = np.array([dff,dff,dff]).transpose()
# dff = ma.array(dfff, mask = trans.mask)
# trans2 = trans.compressed()
# dff2 = dff.compressed()
trans_up = V2_out_up/V1
trans_down = (V2_out_down/V1)
transp=S21p[~np.iscomplex(V3p__)]
transm=S21m[~np.iscomplex(V3m__)]
curve = ax[1].plot(dff,20*np.log10(np.abs(trans)),color = 'g', linestyle = '-',linewidth = 2)
curve_up = ax[1].plot(dff,20*np.log10(np.abs(trans_up)), color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
curve_down = ax[1].plot(dff,20*np.log10(np.abs(trans_down)), color = 'k', linestyle = '--', alpha = .35, linewidth = 1,label = 'Down Sweep')
ax[1].set_title('Mag Transmission')
ax[1].set_xlabel(r'$\delta f_0 / f_0$', color='k')
ax[1].set_ylabel(r'$20 \cdot \log_{10}|S_{21}|$ [dB]', color='k')
ax[1].yaxis.labelpad = 0
ax[1].ticklabel_format(axis='x', style='sci',scilimits = (0,0), useOffset=True)
#ax[1].xaxis.set_ticks(np.hstack((np.arange(-numBW/2.0,0,f_0/Q),np.arange(0,numBW/2.0,f_0/Q))) )
parameter_dict = {'f_0':f_0, 'Qtl':Qtl, 'Qc':Qc, 'phi31':phi31, 'eta':eta, 'delta':delta, 'Zfl':Z1, 'Zres':Z3, 'phiV1':phiV1, 'V30V30':V30*V30}
note = '$P_{probe}$' + ' {:3.0f} dBm, '.format(Pprobe_dBm)+'\n' +(r'$f_0$ = {f_0:3.2e} Hz,' + '\n' + '$Q_{sub1}$ = {Qtl:3.2e},' +'\n' +' $Q_c$ = {Qc:3.2e},' +
'\n' + r'$\phi_{sub2}$ = {ang:3.2f}$^\circ$,'+ '\n' + '${l1}$ = {et:3.2e},' + '\n' +'${l2}$ = {de:3.2e}').format(
nl = '\n', et = parameter_dict['eta']/parameter_dict['V30V30'],
de = parameter_dict['delta']/parameter_dict['V30V30'],
l1 = r'{\eta}/{V_{3,0}^2}',
l2 = r'{\delta}/{V_{3,0}^2}',
ang = parameter_dict['phi31']*180/np.pi,
sub1 = '{i}', sub2 = '{31}',**parameter_dict)
ax[1].text(0.99, 0.01, note,
verticalalignment='bottom', horizontalalignment='right',
transform=ax[1].transAxes,
color='black', fontsize=4)
ax[2] = fig.add_subplot(2,2,2)
curve = ax[2].plot(dff,np.abs(V3),color = 'g', linestyle = '-',linewidth = 2)# <- V3 has complex values when it shouldn't !! should this be real part or abs?
upcurve = ax[2].plot(dff,np.abs(V3_up),color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
upcurve = ax[2].plot(dff,np.abs(V3_down),color = 'k', linestyle = '--', alpha = .35, linewidth = 1,label = 'Down Sweep')
#upcurve__ = ax[2].plot(dff,np.abs(V3__),linestyle = '--')
#curve__ = ax[2].plot(dff[~np.iscomplex(V3p__)].real,V3p__[~np.iscomplex(V3p__)].real,linestyle = '--', marker = '1')
#curve__ = ax[2].plot(dff[~np.iscomplex(V3m__)].real,V3m__[~np.iscomplex(V3m__)].real,linestyle = '--', marker = '2')
ax[2].set_title('Cavity Amplitude')
ax[2].set_xlabel(r'$\delta f_0 / f_0$', color='k')
ax[2].set_ylabel(r'Volts', color='k')
ax[2].ticklabel_format(axis='x', style='sci',scilimits = (0,0),useOffset=False)
ax[3] = fig.add_subplot(2,2,3,aspect='equal')
loop = ax[3].plot(trans.real, trans.imag,color = 'g', linestyle = '-',linewidth = 2)#, label = 'Full Solution')
loop[0].set_label('Full Solution')
loop_down = ax[3].plot(trans_down.real, trans_down.imag,color = 'k', linestyle = '--', alpha = .35, linewidth = 1,label = 'Down Sweep')
#loop = ax[3].plot(transp.real,transp.imag,linestyle = '--', marker = '1')
#oop = ax[3].plot(transm.real,transm.imag,linestyle = '--', marker = '2')
#firstpt = ax[3].plot(trans.real[0:10], trans.imag[0:10], 'ok')
loop_up = ax[3].plot(trans_up.real, trans_up.imag,color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
ax[3].set_title('Resonance Loop')
ax[3].set_xlabel(r'$\Re$[$S_{21}$]', color='k')
ax[3].set_ylabel(r'$\Im$[$S_{21}$]', color='k')
ax[3].yaxis.labelpad = 0
ax[3].ticklabel_format(axis='x', style='sci',scilimits = (0,0),useOffset=False)
ax[3].legend(loc = 'upper center', fontsize=7, bbox_to_anchor=(1.5, -.15), ncol=3,scatterpoints =1, numpoints = 1, labelspacing = .02)
ax[4] = fig.add_subplot(2,2,4)
trans_phase = np.ma.array(np.angle(trans),mask = trans.mask)
trans_up_phase = np.angle(trans_up)
trans_down_phase = np.angle(trans_down)
phase_ang_curve = ax[4].plot(dff,trans_phase,color = 'g', linestyle = '-',linewidth = 2)
phase_up_ang_curve = ax[4].plot(dff,trans_up_phase,color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
phase_down_ang_curve = ax[4].plot(dff,trans_down_phase,color = 'k', linestyle = '--', alpha = .35,linewidth = 1, label = 'Down Sweep')
ax[4].set_title('Transmitted Phase Angle')
ax[4].set_xlabel(r'$\delta f_0 / f_0$', color='k')
ax[4].set_ylabel(r'Ang[$S_{21}$]', color='k')
ax[4].yaxis.labelpad = 0
ax[4].ticklabel_format(axis='x', style='sci',scilimits = (0,0),useOffset=False)
for k in ax.keys():
ax[k].tick_params(axis='y', labelsize=5)
ax[k].tick_params(axis='x', labelsize=5)
plt.subplots_adjust(left=.1, bottom=.1, right=None ,wspace=.35, hspace=.3)
#plt.subplots_adjust(left=.1, bottom=.1, right=None, top=.95 ,wspace=.4, hspace=.4)
#plt.suptitle('Nonlinear Resonator Plots')
plt.show()
if Use_Run_45a:
Title = '45a_Nonlinear_Solition_Pprobe_-54dBm'
#swp._save_fig_dec(fig, Title.replace('\n','_').replace(' ','_'), Use_Date = Use_Date )
#fig.savefig('Nonlinear_Res',dpi=300, transparency = True)
| mit |
ankur-gupta91/horizon-net-ip | openstack_dashboard/test/integration_tests/pages/project/compute/access_and_security/floatingipspage.py | 6 | 4216 | # Copyrigh:t 2015 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class FloatingIPTable(tables.TableRegion):
name = 'floating_ips'
FLOATING_IP_ASSOCIATIONS = (
("ip_id", "instance_id"))
@tables.bind_table_action('allocate')
def allocate_ip(self, allocate_button):
allocate_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_table_action('release')
def release_ip(self, release_button):
release_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('associate', primary=True)
def associate_ip(self, associate_button, row):
associate_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.FLOATING_IP_ASSOCIATIONS)
@tables.bind_row_action('disassociate', primary=True)
def disassociate_ip(self, disassociate_button, row):
disassociate_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
class FloatingipsPage(basepage.BaseNavigationPage):
FLOATING_IPS_TABLE_IP_COLUMN = 'ip'
FLOATING_IPS_TABLE_FIXED_IP_COLUMN = 'fixed_ip'
_floatingips_fadein_popup_locator = (
by.By.CSS_SELECTOR, '.alert.alert-success.alert-dismissable.fade.in>p')
def __init__(self, driver, conf):
super(FloatingipsPage, self).__init__(driver, conf)
self._page_title = "Access & Security"
def _get_row_with_floatingip(self, floatingip):
return self.floatingips_table.get_row(
self.FLOATING_IPS_TABLE_IP_COLUMN, floatingip)
@property
def floatingips_table(self):
return FloatingIPTable(self.driver, self.conf)
def allocate_floatingip(self):
floatingip_form = self.floatingips_table.allocate_ip()
floatingip_form.submit()
ip = re.compile('(([2][5][0-5]\.)|([2][0-4][0-9]\.)'
+ '|([0-1]?[0-9]?[0-9]\.)){3}(([2][5][0-5])|'
'([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))')
match = ip.search((self._get_element(
*self._floatingips_fadein_popup_locator)).text)
floatingip = str(match.group())
return floatingip
def release_floatingip(self, floatingip):
row = self._get_row_with_floatingip(floatingip)
row.mark()
modal_confirmation_form = self.floatingips_table.release_ip()
modal_confirmation_form.submit()
def is_floatingip_present(self, floatingip):
return bool(self._get_row_with_floatingip(floatingip))
def associate_floatingip(self, floatingip, instance_name=None,
instance_ip=None):
row = self._get_row_with_floatingip(floatingip)
floatingip_form = self.floatingips_table.associate_ip(row)
floatingip_form.instance_id.text = "{}: {}".format(instance_name,
instance_ip)
floatingip_form.submit()
def disassociate_floatingip(self, floatingip):
row = self._get_row_with_floatingip(floatingip)
floatingip_form = self.floatingips_table.disassociate_ip(row)
floatingip_form.submit()
def get_fixed_ip(self, floatingip):
row = self._get_row_with_floatingip(floatingip)
return row.cells[self.FLOATING_IPS_TABLE_FIXED_IP_COLUMN].text
| apache-2.0 |
hbrunn/OpenUpgrade | addons/account/res_currency.py | 42 | 2334 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
"""Inherit res.currency to handle accounting date values when converting currencies"""
class res_currency_account(osv.osv):
_inherit = "res.currency"
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
rate = super(res_currency_account, self)._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
#process the case where the account doesn't work with an outgoing currency rate method 'at date' but 'average'
account = context.get('res.currency.compute.account')
account_invert = context.get('res.currency.compute.account_invert')
if account and account.currency_mode == 'average' and account.currency_id:
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('select sum(debit-credit),sum(amount_currency) from account_move_line l ' \
'where l.currency_id=%s and l.account_id=%s and '+query, (account.currency_id.id,account.id,))
tot1,tot2 = cr.fetchone()
if tot2 and not account_invert:
rate = float(tot1)/float(tot2)
elif tot1 and account_invert:
rate = float(tot2)/float(tot1)
return rate
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lenghonglin/LU6200_Android_JB_LU620186_00_Kernel | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
mishless/text-summarizer | text-summarizer-python/batch_process.py | 1 | 9770 | import sys
import os
import collections
import nltk.data
import string
import math
import features
import traceback
import time
import argparse
import nltk.corpus
import nltk.stem.porter
import shutil
import textClasses as tc
import cluster
import fuzzy
import rules
from os import listdir
CUE_PHRASE_FILE = 'bonus_words'
STIGMA_WORDS_FILE = 'stigma_words'
def pre_process_text(text):
while text[0] == "\n":
text = text[1:]
text = text.split('\n', 1)
title = tc.Title(text[0], [])
text = text[1].replace(u"\u2018", '\'').replace(u"\u2019", '\'').replace(u"\u201c",'"').replace(u"\u201d", '"')
words = dict()
sentences = []
sentence_detector = nltk.data.load('tokenizers/punkt/english.pickle')
detected_sentences = sentence_detector.tokenize(text.strip())
stopwords_list = nltk.corpus.stopwords.words('english')
stemmer = nltk.stem.porter.PorterStemmer()
#Pre-process title
tokens = nltk.word_tokenize(title.original)
tokens = [token for token in tokens if token not in stopwords_list]
part_of_speech = nltk.pos_tag(tokens)
for (token, word_pos) in zip(tokens, part_of_speech):
token = token.lower()
if (token not in words) and (token not in list(string.punctuation) and (token not in stopwords_list)):
words[token] = tc.Word(stemmer.stem(token), word_pos, [(lemma, stemmer.stem(lemma)) for synset in nltk.corpus.wordnet.synsets(token) for lemma in synset.lemma_names()])
title.bag_of_words.append(token)
#Pre-process text
for detected_sentence in detected_sentences:
tokens = nltk.word_tokenize(detected_sentence)
tokens = [token for token in tokens if token not in stopwords_list]
if tokens:
part_of_speech = nltk.pos_tag(tokens)
bag_of_words = []
stemmed_bag_of_words = []
for (token, word_pos) in zip(tokens, part_of_speech):
token = token.lower()
if (token not in list(string.punctuation) and (token not in stopwords_list)):
if (token not in words):
words[token] = tc.Word(stemmer.stem(token), word_pos, [(lemma, stemmer.stem(lemma)) for synset in nltk.corpus.wordnet.synsets(token) for lemma in synset.lemma_names()])
elif token in words:
words[token].increment_abs_frequency()
bag_of_words.append(token)
stemmed_bag_of_words.append(stemmer.stem(token))
if (len(bag_of_words) != 0 or len(stemmed_bag_of_words) != 0):
sentences.append(tc.Sentence(detected_sentence, len(sentences) + 1, [], [], None))
sentences[-1].bag_of_words = list(bag_of_words)
sentences[-1].stemmed_bag_of_words = list(stemmed_bag_of_words)
return [title, sentences, words]
def process_input(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--text-file", dest="text_file", help="the file containing the text tom be summarized", required=True)
parser.add_argument("-p", "--percent", dest="percentage", help="the compression rate as percentage", required=True)
parser.add_argument("-t", "--threads", dest="threads", help="the number of threads", required=True)
# Process arguments
args = parser.parse_args()
threads = args.threads
percentage = args.percentage
text_file = args.text_file
with open(text_file, 'r') as f:
text = f.read()
f.closed
return {"text": text, "percentage": percentage, "threads": threads}
def resource_loader():
resources = dict()
path = './resources'
resource_files = [file.split()[0] for file in os.listdir(path)]
for resource_file_name in resource_files:
with open(path + "/"+resource_file_name, 'r') as f:
text = f.read()
f.closed
resources[resource_file_name.split('.')[0]] = set(list(text.split('\n')))
return resources
def print_stuff(sentences, sentences_features):
data = sentences_features
for i in range(0, len(data)):
print("******************************")
print("Sentence: ", end="")
print(sentences[i].original)
print_sentence_info(data[i])
print("Rules: ")
rules.print_rules_results(data[i])
def filter_using_clusters(sentences, percentage, clusters):
number_sentences = math.floor(percentage * len(sentences))
sentences = sorted(sentences, key=lambda x: x.rank, reverse=True)
clusters_counter = [0] * len(clusters)
sentence_counter = 0;
chosen_sentences = []
while len(chosen_sentences) < number_sentences:
sentence_counter = 0
for i in range(0, len(clusters)):
for j in range(0, len(sentences)):
if (clusters_counter[i] == min(clusters_counter) and clusters[i].count(sentences[j].position) == 1):
chosen_sentences.append(sentences[j])
clusters[i].remove(sentences[j].position)
if (len(clusters[i]) == 0):
clusters_counter[i] = sys.maxsize
else:
clusters_counter[i] += 1
break;
if (len(chosen_sentences) >= number_sentences):
break;
chosen_sentences = sorted(chosen_sentences, key=lambda x: x.position)
return chosen_sentences
def print_based_on_fuzzy(angels_objects, p):
print("***** RESULTS BASED ONLY ON FUZZY *****")
number_sentences = math.floor(p * len(angels_objects))
sorted_by_rank = [element for element in sorted(angels_objects,
key=lambda x: x.rank, reverse=True)][0:number_sentences]
vukans_list = sorted(sorted_by_rank, key=lambda x: x.position, reverse=False)
for sentence in vukans_list:
print(sentence.original)
print("")
def summarize_text(text):
percentage = 22
threads = 4
resources = resource_loader()
preprocessed_text = pre_process_text(text)
keyword_feature_value = features.keyword_feature(preprocessed_text[1], preprocessed_text[2])
title_word_feature_value = features.title_word_feature(preprocessed_text[0], preprocessed_text[1])
sentence_location_feature_value = features.sentence_location_feature(preprocessed_text[1])
sentence_length_feature_value = features.sentence_length_feature(preprocessed_text[1])
proper_noun_feature_value = features.pos_tag_feature(preprocessed_text[1], preprocessed_text[2], 'NNP')
cue_phrase_feature_value = features.phrase_feature(preprocessed_text[1], resources[CUE_PHRASE_FILE])
stigma_phrase_feature_value = features.phrase_feature(preprocessed_text[1], resources[STIGMA_WORDS_FILE])
numerical_data_feature_value = features.pos_tag_feature(preprocessed_text[1], preprocessed_text[2], 'CD')
k_means_result = cluster.k_means(preprocessed_text[1], preprocessed_text[2], percentage, threads)
# summary = cluster.cluster_based_summary(preprocessed_text[1], k_means_result[0], k_means_result[1])
sentences_feature_list = []
for (
keyword_value,
title_word_value,
sentence_location_value,
sentence_lenght_value,
proper_noun_value,
cue_phase_value,
stigma_word_value,
numerical_data_value,
) in zip(
keyword_feature_value,
title_word_feature_value,
sentence_location_feature_value,
sentence_length_feature_value,
proper_noun_feature_value,
cue_phrase_feature_value,
stigma_phrase_feature_value,
numerical_data_feature_value,
):
sentences_feature_list.append({
'keyword': keyword_value,
'title_word': title_word_value,
'sentence_location': sentence_location_value,
'sentence_length': sentence_lenght_value,
'proper_noun': proper_noun_value,
'cue_phrase': cue_phase_value,
'nonessential': stigma_word_value,
'numerical_data': numerical_data_value,
})
#fuzzy.print_everything(preprocessed_text[1], sentences_feature_list)
fuzzy.set_fuzzy_ranks(preprocessed_text[1], sentences_feature_list)
summary_sentences = filter_using_clusters(preprocessed_text[1], float(percentage)/100, k_means_result[1])
summary = ""
for sentence in summary_sentences:
summary += sentence.original
return summary
#main
def summarize_file(file_name):
file = open(file_name)
text = file.read()
file.close()
summary = summarize_text(text)
return summary
def get_all_file_names(root_name):
names = []
for (roots, dirs, files) in os.walk(root_name):
for file in files:
names.append(roots+"\\" + file)
for i in range(0, len(names)):
names[i] = names[i].replace("\\", "/")
return names
def main():
dir_name = "texts"
output_dir = "summaries"
problem_dir = "problems"
names = get_all_file_names(dir_name)
total = len(names)
i = 1
for name in names:
output_name = output_dir + "/" + name.replace(dir_name + "/", "")
problem_name = problem_dir + "/" + name.replace(dir_name + "/", "")
if output_name.replace(output_dir + "/", "") in listdir(output_dir):
print("File " + output_name + "already summarized!")
i += 1
continue
print("Processing file " + name + " (%d/%d)" % (i,total))
try:
summary = summarize_file(name)
file = open(output_name, "w")
file.write(summary)
file.close()
i += 1
except:
shutil.move(name, problem_name)
print("Error occured. File " + name + " moved to " + problem_dir + ".")
i += 1
main() | mit |
dycodedev/taiga-back | tests/integration/resources_permissions/test_search_resources.py | 21 | 5880 | from django.core.urlresolvers import reverse
from taiga.permissions.permissions import MEMBERS_PERMISSIONS, ANON_PERMISSIONS, USER_PERMISSIONS
from tests import factories as f
from tests.utils import helper_test_http_method_and_keys, disconnect_signals, reconnect_signals
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_member_with_perms = f.UserFactory.create()
m.project_member_without_perms = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.other_user = f.UserFactory.create()
m.public_project = f.ProjectFactory(is_private=False,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], USER_PERMISSIONS)),
owner=m.project_owner)
m.private_project1 = f.ProjectFactory(is_private=True,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], USER_PERMISSIONS)),
owner=m.project_owner)
m.private_project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner)
m.public_membership = f.MembershipFactory(project=m.public_project,
user=m.project_member_with_perms,
role__project=m.public_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.private_membership1 = f.MembershipFactory(project=m.private_project1,
user=m.project_member_with_perms,
role__project=m.private_project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project1,
user=m.project_member_without_perms,
role__project=m.private_project1,
role__permissions=[])
m.private_membership2 = f.MembershipFactory(project=m.private_project2,
user=m.project_member_with_perms,
role__project=m.private_project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project2,
user=m.project_member_without_perms,
role__project=m.private_project2,
role__permissions=[])
f.MembershipFactory(project=m.public_project,
user=m.project_owner,
is_owner=True)
f.MembershipFactory(project=m.private_project1,
user=m.project_owner,
is_owner=True)
f.MembershipFactory(project=m.private_project2,
user=m.project_owner,
is_owner=True)
m.public_issue = f.IssueFactory(project=m.public_project,
status__project=m.public_project,
severity__project=m.public_project,
priority__project=m.public_project,
type__project=m.public_project,
milestone__project=m.public_project)
m.private_issue1 = f.IssueFactory(project=m.private_project1,
status__project=m.private_project1,
severity__project=m.private_project1,
priority__project=m.private_project1,
type__project=m.private_project1,
milestone__project=m.private_project1)
m.private_issue2 = f.IssueFactory(project=m.private_project2,
status__project=m.private_project2,
severity__project=m.private_project2,
priority__project=m.private_project2,
type__project=m.private_project2,
milestone__project=m.private_project2)
return m
def test_search_list(client, data):
url = reverse('search-list')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method_and_keys(client, 'get', url, {'project': data.public_project.pk}, users)
all_keys = set(['count', 'userstories', 'issues', 'tasks', 'wikipages'])
assert results == [(200, all_keys), (200, all_keys), (200, all_keys), (200, all_keys), (200, all_keys)]
results = helper_test_http_method_and_keys(client, 'get', url, {'project': data.private_project1.pk}, users)
assert results == [(200, all_keys), (200, all_keys), (200, all_keys), (200, all_keys), (200, all_keys)]
results = helper_test_http_method_and_keys(client, 'get', url, {'project': data.private_project2.pk}, users)
assert results == [(200, set(['count'])), (200, set(['count'])), (200, set(['count'])), (200, all_keys), (200, all_keys)]
| agpl-3.0 |
michaelBenin/django-jenkins | django_jenkins/tasks/__init__.py | 3 | 1658 | # -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.utils.importlib import import_module
class BaseTask(object):
"""
Base interface for ci tasks
"""
option_list = []
def __init__(self, test_labels, options):
self.test_labels = test_labels
def setup_test_environment(self, **kwargs):
pass
def teardown_test_environment(self, **kwargs):
pass
def before_suite_run(self, **kwargs):
pass
def after_suite_run(self, **kwargs):
pass
def build_suite(self, suite, **kwargs):
pass
def get_apps_under_test(test_labels, all_apps=False):
"""
Convert test_lables for apps names
all_apps - if test_labels empty, ignore white list,
and returns all installed apps
"""
if not test_labels:
if hasattr(settings, 'PROJECT_APPS') and not all_apps:
apps = settings.PROJECT_APPS
else:
apps = settings.INSTALLED_APPS
else:
apps = [app for app in settings.INSTALLED_APPS
for label in test_labels
if app == label.split('.')[0] or
app.endswith('.%s' % label.split('.')[0])]
return apps
def get_apps_locations(test_labels, all_apps=False):
"""
Returns list of paths to tested apps
"""
return [os.path.dirname(
os.path.normpath(import_module(app_name).__file__))
for app_name in get_apps_under_test(test_labels, all_apps)]
def get_app_location(app_module):
"""
Returns path to app
"""
return os.path.dirname(os.path.normpath(app_module.__file__))
| lgpl-3.0 |
aduric/crossfit | nonrel/django/utils/itercompat.py | 294 | 1169 | """
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
import itertools
# Fallback for Python 2.4, Python 2.5
def product(*args, **kwds):
"""
Taken from http://docs.python.org/library/itertools.html#itertools.product
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
if hasattr(itertools, 'product'):
product = itertools.product
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
def all(iterable):
for item in iterable:
if not item:
return False
return True
def any(iterable):
for item in iterable:
if item:
return True
return False
| bsd-3-clause |
jcpowermac/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_maintenance.py | 22 | 12920 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: zabbix_maintenance
short_description: Create Zabbix maintenance windows
description:
- This module will let you create Zabbix maintenance windows.
version_added: "1.8"
author: "Alexander Bulimov (@abulimov)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
state:
description:
- Create or remove a maintenance window.
required: false
default: present
choices: [ "present", "absent" ]
host_names:
description:
- Hosts to manage maintenance window for.
Separate multiple hosts with commas.
C(host_name) is an alias for C(host_names).
B(Required) option when C(state) is I(present)
and no C(host_groups) specified.
required: false
default: null
aliases: [ "host_name" ]
host_groups:
description:
- Host groups to manage maintenance window for.
Separate multiple groups with commas.
C(host_group) is an alias for C(host_groups).
B(Required) option when C(state) is I(present)
and no C(host_names) specified.
required: false
default: null
aliases: [ "host_group" ]
minutes:
description:
- Length of maintenance window in minutes.
required: false
default: 10
name:
description:
- Unique name of maintenance window.
required: true
desc:
description:
- Short description of maintenance window.
required: true
default: Created by Ansible
collect_data:
description:
- Type of maintenance. With data collection, or without.
required: false
default: "true"
extends_documentation_fragment:
- zabbix
notes:
- Useful for setting hosts in maintenance mode before big update,
and removing maintenance window after update.
- Module creates maintenance window from now() to now() + minutes,
so if Zabbix server's time and host's time are not synchronized,
you will get strange results.
- Install required module with 'pip install zabbix-api' command.
'''
EXAMPLES = '''
- name: Create a named maintenance window for host www1 for 90 minutes
zabbix_maintenance:
name: Update of www1
host_name: www1.example.com
state: present
minutes: 90
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Create a named maintenance window for host www1 and host groups Office and Dev
zabbix_maintenance:
name: Update of www1
host_name: www1.example.com
host_groups:
- Office
- Dev
state: present
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Create a named maintenance window for hosts www1 and db1, without data collection.
zabbix_maintenance:
name: update
host_names:
- www1.example.com
- db1.example.com
state: present
collect_data: False
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Remove maintenance window by name
zabbix_maintenance:
name: Test1
state: absent
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
'''
import datetime
import time
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc):
end_time = start_time + period
try:
zbx.maintenance.create(
{
"groupids": group_ids,
"hostids": host_ids,
"name": name,
"maintenance_type": maintenance_type,
"active_since": str(start_time),
"active_till": str(end_time),
"description": desc,
"timeperiods": [{
"timeperiod_type": "0",
"start_date": str(start_time),
"period": str(period),
}]
}
)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def update_maintenance(zbx, maintenance_id, group_ids, host_ids, start_time, maintenance_type, period, desc):
end_time = start_time + period
try:
zbx.maintenance.update(
{
"maintenanceid": maintenance_id,
"groupids": group_ids,
"hostids": host_ids,
"maintenance_type": maintenance_type,
"active_since": str(start_time),
"active_till": str(end_time),
"description": desc,
"timeperiods": [{
"timeperiod_type": "0",
"start_date": str(start_time),
"period": str(period),
}]
}
)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_maintenance(zbx, name):
try:
maintenances = zbx.maintenance.get(
{
"filter":
{
"name": name,
},
"selectGroups": "extend",
"selectHosts": "extend"
}
)
except BaseException as e:
return 1, None, str(e)
for maintenance in maintenances:
maintenance["groupids"] = [group["groupid"] for group in maintenance["groups"]] if "groups" in maintenance else []
maintenance["hostids"] = [host["hostid"] for host in maintenance["hosts"]] if "hosts" in maintenance else []
return 0, maintenance, None
return 0, None, None
def delete_maintenance(zbx, maintenance_id):
try:
zbx.maintenance.delete([maintenance_id])
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_group_ids(zbx, host_groups):
group_ids = []
for group in host_groups:
try:
result = zbx.hostgroup.get(
{
"output": "extend",
"filter":
{
"name": group
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Group id for group %s not found" % group
group_ids.append(result[0]["groupid"])
return 0, group_ids, None
def get_host_ids(zbx, host_names):
host_ids = []
for host in host_names:
try:
result = zbx.host.get(
{
"output": "extend",
"filter":
{
"name": host
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Host id for host %s not found" % host
host_ids.append(result[0]["hostid"])
return 0, host_ids, None
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
server_url=dict(type='str', required=True, default=None, aliases=['url']),
host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
minutes=dict(type='int', required=False, default=10),
host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
name=dict(type='str', required=True),
desc=dict(type='str', required=False, default="Created by Ansible"),
collect_data=dict(type='bool', required=False, default=True),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
host_names = module.params['host_names']
host_groups = module.params['host_groups']
state = module.params['state']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
minutes = module.params['minutes']
name = module.params['name']
desc = module.params['desc']
server_url = module.params['server_url']
collect_data = module.params['collect_data']
timeout = module.params['timeout']
if collect_data:
maintenance_type = 0
else:
maintenance_type = 1
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except BaseException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
changed = False
if state == "present":
if not host_names and not host_groups:
module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
now = datetime.datetime.now().replace(second=0)
start_time = time.mktime(now.timetuple())
period = 60 * int(minutes) # N * 60 seconds
if host_groups:
(rc, group_ids, error) = get_group_ids(zbx, host_groups)
if rc != 0:
module.fail_json(msg="Failed to get group_ids: %s" % error)
else:
group_ids = []
if host_names:
(rc, host_ids, error) = get_host_ids(zbx, host_names)
if rc != 0:
module.fail_json(msg="Failed to get host_ids: %s" % error)
else:
host_ids = []
(rc, maintenance, error) = get_maintenance(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if maintenance and (
sorted(group_ids) != sorted(maintenance["groupids"]) or
sorted(host_ids) != sorted(maintenance["hostids"]) or
str(maintenance_type) != maintenance["maintenance_type"] or
str(int(start_time)) != maintenance["active_since"] or
str(int(start_time + period)) != maintenance["active_till"]
):
if module.check_mode:
changed = True
else:
(rc, _, error) = update_maintenance(zbx, maintenance["maintenanceid"], group_ids, host_ids, start_time, maintenance_type, period, desc)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to update maintenance: %s" % error)
if not maintenance:
if module.check_mode:
changed = True
else:
(rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to create maintenance: %s" % error)
if state == "absent":
(rc, maintenance, error) = get_maintenance(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if maintenance:
if module.check_mode:
changed = True
else:
(rc, _, error) = delete_maintenance(zbx, maintenance["maintenanceid"])
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to remove maintenance: %s" % error)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
SnowDroid/kernel_lge_hammerhead | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
rspavel/spack | var/spack/repos/builtin/packages/ont-albacore/package.py | 5 | 1455 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class OntAlbacore(Package):
"""Albacore is a software project that provides an entry point to the Oxford
Nanopore basecalling algorithms. It can be run from the command line on
Windows and multiple Linux platforms. A selection of configuration files
allow basecalling DNA libraries made with our current range of sequencing
kits and Flow Cells."""
homepage = "https://nanoporetech.com"
url = "https://mirror.oxfordnanoportal.com/software/analysis/ont_albacore-2.3.1-cp35-cp35m-manylinux1_x86_64.whl"
version('2.3.1', sha256='dc1af11b0f38b26d071e5389c2b4595c496319c987401754e1853de42467a7d1', expand=False)
extends('python')
depends_on('python@3.5.0:3.5.999', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-numpy@1.13.0', type=('build', 'run'))
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('py-h5py', type=('build', 'run'))
depends_on('py-ont-fast5-api', type=('build', 'run'))
depends_on('py-pip', type=('build'))
def install(self, spec, prefix):
pip = which('pip')
pip('install', self.stage.archive_file, '--prefix={0}'.format(prefix))
| lgpl-2.1 |
meredith-digops/ansible | lib/ansible/modules/monitoring/bigpanda.py | 77 | 6171 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigpanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
version_added: "1.8"
description:
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
options:
component:
description:
- "The name of the component being deployed. Ex: billing"
required: true
aliases: ['name']
version:
description:
- The deployment version.
required: true
token:
description:
- API token.
required: true
state:
description:
- State of the deployment.
required: true
choices: ['started', 'finished', 'failed']
hosts:
description:
- Name of affected host name. Can be a list.
required: false
default: machine's hostname
aliases: ['host']
env:
description:
- The environment name, typically 'production', 'staging', etc.
required: false
owner:
description:
- The person responsible for the deployment.
required: false
description:
description:
- Free text description of the deployment.
required: false
url:
description:
- Base URL of the API server.
required: False
default: https://api.bigpanda.io
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
# informational: requirements for nodes
requirements: [ ]
'''
EXAMPLES = '''
- bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
state: started
- bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
state: finished
# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
- bigpanda:
component: myapp
version: '1.3'
token: '{{ bigpanda_token }}'
hosts: '{{ ansible_hostname }}'
state: started
delegate_to: localhost
register: deployment
- bigpanda:
component: '{{ deployment.component }}'
version: '{{ deployment.version }}'
token: '{{ deployment.token }}'
state: finished
delegate_to: localhost
'''
# ===========================================
# Module execution.
#
import socket
def main():
module = AnsibleModule(
argument_spec=dict(
component=dict(required=True, aliases=['name']),
version=dict(required=True),
token=dict(required=True, no_log=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
env=dict(required=False),
owner=dict(required=False),
description=dict(required=False),
message=dict(required=False),
source_system=dict(required=False, default='ansible'),
validate_certs=dict(default='yes', type='bool'),
url=dict(required=False, default='https://api.bigpanda.io'),
),
supports_check_mode=True,
check_invalid_arguments=False,
)
token = module.params['token']
state = module.params['state']
url = module.params['url']
# Build the common request body
body = dict()
for k in ('component', 'version', 'hosts'):
v = module.params[k]
if v is not None:
body[k] = v
if not isinstance(body['hosts'], list):
body['hosts'] = [body['hosts']]
# Insert state-specific attributes to body
if state == 'started':
for k in ('source_system', 'env', 'owner', 'description'):
v = module.params[k]
if v is not None:
body[k] = v
request_url = url + '/data/events/deployments/start'
else:
message = module.params['message']
if message is not None:
body['errorMessage'] = message
if state == 'finished':
body['status'] = 'success'
else:
body['status'] = 'failure'
request_url = url + '/data/events/deployments/end'
# Build the deployment object we return
deployment = dict(token=token, url=url)
deployment.update(body)
if 'errorMessage' in deployment:
message = deployment.pop('errorMessage')
deployment['message'] = message
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True, **deployment)
# Send the data to bigpanda
data = json.dumps(body)
headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
try:
response, info = fetch_url(module, request_url, data=data, headers=headers)
if info['status'] == 200:
module.exit_json(changed=True, **deployment)
else:
module.fail_json(msg=json.dumps(info))
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
cloudcopy/seahub | seahub/wiki/models.py | 6 | 1678 | from django.db import models
from seahub.base.fields import LowerCaseCharField
class WikiDoesNotExist(Exception):
pass
class WikiPageMissing(Exception):
pass
class PersonalWikiManager(models.Manager):
def save_personal_wiki(self, username, repo_id):
"""
Create or update group wiki.
"""
try:
wiki = self.get(username=username)
wiki.repo_id = repo_id
except self.model.DoesNotExist:
wiki = self.model(username=username, repo_id=repo_id)
wiki.save(using=self._db)
return wiki
class PersonalWiki(models.Model):
username = LowerCaseCharField(max_length=255, unique=True)
repo_id = models.CharField(max_length=36)
objects = PersonalWikiManager()
class GroupWikiManager(models.Manager):
def save_group_wiki(self, group_id, repo_id):
"""
Create or update group wiki.
"""
try:
groupwiki = self.get(group_id=group_id)
groupwiki.repo_id = repo_id
except self.model.DoesNotExist:
groupwiki = self.model(group_id=group_id, repo_id=repo_id)
groupwiki.save(using=self._db)
return groupwiki
class GroupWiki(models.Model):
group_id = models.IntegerField(unique=True)
repo_id = models.CharField(max_length=36)
objects = GroupWikiManager()
###### signal handlers
from django.dispatch import receiver
from seahub.signals import repo_deleted
@receiver(repo_deleted)
def remove_personal_wiki(sender, **kwargs):
repo_owner = kwargs['repo_owner']
repo_id = kwargs['repo_id']
PersonalWiki.objects.filter(username=repo_owner, repo_id=repo_id).delete()
| apache-2.0 |
ThomasA/pywt | demo/dwt_signal_decomposition.py | 1 | 1808 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
import pywt
ecg = np.load(os.path.join('data', 'ecg.npy'))
data1 = np.concatenate((np.arange(1, 400),
np.arange(398, 600),
np.arange(601, 1024)))
x = np.linspace(0.082, 2.128, num=1024)[::-1]
data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x)))
mode = pywt.MODES.sp1
def plot_signal_decomp(data, w, title):
"""Decompose and plot a signal S.
S = An + Dn + Dn-1 + ... + D1
"""
w = pywt.Wavelet(w)
a = data
ca = []
cd = []
for i in range(5):
(a, d) = pywt.dwt(a, w, mode)
ca.append(a)
cd.append(d)
rec_a = []
rec_d = []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
fig = plt.figure()
ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
ax_main.set_title(title)
ax_main.plot(data)
ax_main.set_xlim(0, len(data) - 1)
for i, y in enumerate(rec_a):
ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
ax.plot(y, 'r')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("A%d" % (i + 1))
for i, y in enumerate(rec_d):
ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
ax.plot(y, 'g')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("D%d" % (i + 1))
plot_signal_decomp(data1, 'coif5', "DWT: Signal irregularity")
plot_signal_decomp(data2, 'sym5',
"DWT: Frequency and phase change - Symmlets5")
plot_signal_decomp(ecg, 'sym5', "DWT: Ecg sample - Symmlets5")
plt.show()
| mit |
jhgao/blogger-grabber | grabber.py | 1 | 4348 | # python 2.7
from time import sleep
import os
import sys
import re
import urlparse
from bs4 import BeautifulSoup as bs
import grabimgurl as gimg
import urlproxy as urlp
image_dir = 'images/'
sleep_between_post = 0
num_limit = 150
max_depth_img_url = 3
# anchor
newer_posts_a_id = 'Blog1_blog-pager-newer-link'
log_gotfn = 'gotpost.log'
class ProbError(Exception):
pass
class NoTimestampError(ProbError):
pass
def find_string_div( soup, divclass ):
list_div = soup.find_all("div", {"class": divclass})
if len(list_div) > 0:
return list_div[0].string
def prob_save_post( soup ):
# touch log file
script_dir = os.path.dirname(__file__)
logf = open(os.path.join(script_dir,log_gotfn),'a')
logf.close()
# save post in to dirs
p = {'title':'','body':'','author':'','timestamp':''}
list_div_outer = soup("div", {"class": "post-outer"})
for outer in list_div_outer:
try: # time stamp
p['timestamp'] = outer.find("abbr",{"itemprop":"datePublished"})['title']
if 0 == len(p['timestamp']):
print 'post missing timestamp'
raise NoTimestampError
else:
print p['timestamp']
except:
raise NoTimestampError
if p['timestamp'] in open(log_gotfn,'r').read(): #skip post already saved
print 'skip saving'
return
try: # title
p['title']= outer.find("h3").string
titleline = p['title'].replace('\r','+')
titleline = titleline .replace('\n','+')
print titleline
p['titleline'+ titleline ]=''
except:
print 'post title error'
try: # body
p['body']= outer.find("div",{"id":re.compile("^post-body")})
except:
print 'post body error'
try: # author
p['author'] = outer.find("span",{"class":"post-author vcard"}).find("span",{"itemprop":"author"}).find("span",{"itemprop":"name"}).string
except:
print 'post author error'
#save post in folder
subdirname = p['timestamp']
tgtpath = os.path.join(script_dir, subdirname)
if not os.path.exists(tgtpath):
os.makedirs(tgtpath)
for key in p.keys():
f = open(os.path.join(tgtpath,key),'w')
f.write(p[key].encode('utf8'))
f.close()
#save images in the post
if len(p['body']) > 0 :
save_imgs_in_soup( p['body'] , tgtpath)
# log save success
logf = open(os.path.join(script_dir,log_gotfn),'a')
logf.write( (p['timestamp']+'\n').encode('utf8') )
logf.write( (p['title']+'\n').encode('utf8') )
logf.close()
def save_imgs_in_soup( soup, todir):
'''try to get original img, fall back to direct img'''
imglist = soup('img')
if len(imglist) == 0:
return
imgpath = os.path.join(todir, image_dir)
if not os.path.exists(imgpath):
os.makedirs(imgpath)
for img in imglist:
try:
ourl = img.parent.get('href')
ofn = os.path.join(imgpath,'o'+ ourl.split('/')[-1])
gimg.save_img_in_url(ourl,ofn,max_depth_img_url)
except:
print '[fail orig img]', ourl
try:
surl = img['src']
sfn = os.path.join(imgpath,'s'+ surl.split('/')[-1])
gimg.save_img_in_url(surl,sfn,max_depth_img_url)
except gimg.GrabImgError as e:
print '[faild img]',surl
print e
def newer_page( soup ):
try:
url = soup('a', id=newer_posts_a_id)[0].get('href')
return url
except:
return None
def main(url):
step = 1
while url:
print
print 'step',step
print 'page',url
soup = bs(urlp.fetch_url(url))
try:
prob_save_post(soup)
except NoTimestampError:
print 'missing timestamp', url
step = step + 1
if (step > num_limit):
break
url = newer_page( soup )
if ( url is None ):
break
sleep(sleep_between_post)
if __name__ == '__main__':
url = sys.argv[-1]
if not url.lower().startswith('http'):
print 'use http://...'
main(url)
| mit |
glewarne/SimplKernel-5.1.1-G92x | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
rduivenvoorde/QGIS | tests/src/python/test_provider_tabfile.py | 31 | 4462 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the OGR/MapInfo tab provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-01-28'
__copyright__ = 'Copyright 2016, The QGIS Project'
import os
import tempfile
from qgis.core import QgsVectorLayer, QgsFeatureRequest, QgsVectorDataProvider, QgsField
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir
from qgis.testing import start_app, unittest
import osgeo.gdal # NOQA
from utilities import unitTestDataPath
import shutil
start_app()
TEST_DATA_DIR = unitTestDataPath()
# Note - doesn't implement ProviderTestCase as OGR provider is tested by the shapefile provider test
class TestPyQgsTabfileProvider(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.basetestpath = tempfile.mkdtemp()
cls.dirs_to_cleanup = [cls.basetestpath]
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
for dirname in cls.dirs_to_cleanup:
shutil.rmtree(dirname, True)
def testDateTimeFormats(self):
# check that date and time formats are correctly interpreted
basetestfile = os.path.join(TEST_DATA_DIR, 'tab_file.tab')
vl = QgsVectorLayer('{}|layerid=0'.format(basetestfile), 'test', 'ogr')
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName('time')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName('date_time')).type(), QVariant.DateTime)
f = next(vl.getFeatures(QgsFeatureRequest()))
date_idx = vl.fields().lookupField('date')
assert isinstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2004, 5, 3))
time_idx = vl.fields().lookupField('time')
assert isinstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 41, 00))
datetime_idx = vl.fields().lookupField('date_time')
assert isinstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(QDate(2004, 5, 3), QTime(13, 41, 00)))
def testUpdateMode(self):
""" Test that on-the-fly re-opening in update/read-only mode works """
basetestfile = os.path.join(TEST_DATA_DIR, 'tab_file.tab')
vl = QgsVectorLayer('{}|layerid=0'.format(basetestfile), 'test', 'ogr')
caps = vl.dataProvider().capabilities()
self.assertTrue(caps & QgsVectorDataProvider.AddFeatures)
# We should be really opened in read-only mode even if write capabilities are declared
self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")
# Test that startEditing() / commitChanges() plays with enterUpdateMode() / leaveUpdateMode()
self.assertTrue(vl.startEditing())
self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")
self.assertTrue(vl.dataProvider().isValid())
self.assertTrue(vl.commitChanges())
self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")
self.assertTrue(vl.dataProvider().isValid())
def testInteger64WriteTabfile(self):
"""Check writing Integer64 fields to an MapInfo tabfile (which does not support that type)."""
base_dest_file_name = os.path.join(str(QDir.tempPath()), 'integer64')
dest_file_name = base_dest_file_name + '.tab'
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.tab'), base_dest_file_name + '.tab')
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.dat'), base_dest_file_name + '.dat')
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.map'), base_dest_file_name + '.map')
shutil.copy(os.path.join(TEST_DATA_DIR, 'tab_file.id'), base_dest_file_name + '.id')
vl = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().addAttributes([QgsField("int8", QVariant.LongLong, "integer64")]))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
spz-signup/spz-signup | src/spz/spz/pdf.py | 1 | 11338 | # -*- coding: utf-8 -*-
"""Helper functions for pdf-generator.
"""
from datetime import datetime
import fpdf
from flask import make_response
from flask_login import login_required
from spz import app, models
class SPZPDF(fpdf.FPDF):
"""Base class used for ALL PDF generators here."""
def __init__(self):
fpdf.set_global('FPDF_CACHE_MODE', 2)
fpdf.set_global('FPDF_CACHE_DIR', '/tmp')
super(SPZPDF, self).__init__('L', 'mm', 'A4')
self.add_font('DejaVu', '', '/usr/share/fonts/truetype/dejavu/DejaVuSansCondensed.ttf', uni=True)
self.add_font('DejaVu', 'B', '/usr/share/fonts/truetype/dejavu/DejaVuSansCondensed-Bold.ttf', uni=True)
def font_normal(self, size):
"""Set font to a normal one (no bold/italic).
:param size: desired font size
"""
self.set_font('DejaVu', '', size)
def font_bold(self, size):
"""Set font to a bold one (no italic).
:param size: desired font size
"""
self.set_font('DejaVu', 'B', size)
def gen_final_data(self):
"""Get final byte string data for PDF."""
return self.output('', 'S')
def gen_response(self, filename):
"""Generate HTTP response that download this PDF.
:param filename: filename of the downloaded file, w/o '.pdf' extension
"""
resp = make_response(self.gen_final_data())
resp.headers['Content-Disposition'] = 'attachment; filename="{0}.pdf"'.format(filename)
resp.mimetype = 'application/pdf'
return resp
class TablePDF(SPZPDF):
def header(self):
self.font_normal(8)
self.cell(0, 5, 'Karlsruher Institut für Technologie (KIT)', 0, 0)
self.cell(0, 5, app.config['SEMESTER_NAME'], 0, 1, 'R')
self.font_bold(10)
self.cell(0, 5, 'Sprachenzentrum', 0)
def get_column_size(self):
return self.column_size
def get_header_texts(self):
return self.header_texts
class CourseGenerator(TablePDF):
column_size = [7, 40, 40, 20, 70, 40, 15, 15, 15, 15]
header_texts = ["Nr.", "Nachname", "Vorname", "Matr.", "E-Mail", "Telefon", "Tln.", "Prf.", "Note", "Prozent"]
def header(self):
super(CourseGenerator, self).header()
self.cell(0, 5, 'Kursliste', 0, 0, 'R')
self.ln()
def footer(self):
self.set_y(-20)
self.font_normal(10)
self.cell(
0,
7,
'Datum _________________ Unterschrift ____________________________________',
0,
1,
'R'
)
self.font_normal(6)
self.cell(
0,
5,
'Personen, die nicht auf der Liste stehen, '
'haben nicht bezahlt und sind nicht zur Kursteilnahme berechtigt. '
'Dementsprechend können Sie auch keine Teilnahme- oder Prüfungsscheine erhalten.',
0,
1,
'C'
)
self.cell(
0,
5,
'Nach Kursende bitte abhaken, ob der Teilnehmer regelmäßig anwesend war, '
'ob er die Abschlussprüfung bestanden hat und dann die unterschriebene Liste wieder zurückgeben. Danke!',
0,
1,
'C'
)
class PresenceGenerator(TablePDF):
column_size = [7, 40, 40, 20, 80, 6]
header_texts = ["Nr.", "Nachname", "Vorname", "Matr.", "E-Mail", ""]
def header(self):
super(PresenceGenerator, self).header()
self.cell(0, 5, 'Anwesenheitsliste', 0, 0, 'R')
self.ln()
def footer(self):
self.set_y(-10)
self.font_normal(8)
self.cell(0, 5, 'Diese Liste bildet lediglich eine Hilfe im Unterricht und verbleibt beim Dozenten.', 0, 1, 'C')
class BillGenerator(SPZPDF):
def header(this):
this.zwischenraum = 21
this.teiler = ''
this.rahmen = 0
this.breite = 128
now = datetime.now()
if now.month < 3:
semester = 'Wintersemester {0}/{1}'.format(now.year-1, now.year)
elif now.month < 9:
semester = 'Sommersemester {0}'.format(now.year)
else:
semester = 'Wintersemester {0}/{1}'.format(now.year, now.year+1)
this.font_normal(8)
this.cell(80, 5, 'Karlsruher Institut für Technologie (KIT)', 0, 0)
this.cell(48, 5, semester, 0, 0, 'R')
this.cell(this.zwischenraum, 5, this.teiler, this.rahmen, 0, 'C')
this.cell(80, 5, 'Karlsruher Institut für Technologie (KIT)', 0, 0)
this.cell(48, 5, semester, 0, 1, 'R')
this.font_bold(8)
this.cell(80, 5, 'Sprachenzentrum', 0, 0)
this.font_normal(8)
this.cell(48, 5, datetime.now().strftime("%d.%m.%Y"), 0, 0, 'R')
this.cell(this.zwischenraum, 5, this.teiler, this.rahmen, 0, 'C')
this.font_bold(8)
this.cell(80, 5, 'Sprachenzentrum', 0, 0)
this.font_normal(8)
this.cell(48, 5, datetime.now().strftime("%d.%m.%Y"), 0, 1, 'R')
def footer(this):
this.set_y(-15)
this.font_normal(8)
this.cell(
this.breite,
4,
'Diese Quittung wurde maschinell ausgestellt und ist ohne Unterschrift gültig.',
0,
0,
'C'
)
this.cell(this.zwischenraum, 4, this.teiler, this.rahmen, 0, 'C')
this.cell(
this.breite,
4,
'Diese Quittung wurde maschinell ausgestellt und ist ohne Unterschrift gültig.',
0,
1,
'C'
)
this.cell(this.breite, 4, 'Exemplar für den Teilnehmer.', 0, 0, 'C')
this.cell(this.zwischenraum, 4, this.teiler, this.rahmen, 0, 'C')
this.cell(this.breite, 4, 'Exemplar für das Sprachenzentrum.', 0, 1, 'C')
def list_presence(pdflist, course):
column = pdflist.get_column_size()
header = pdflist.get_header_texts()
def maybe(x):
return x if x else ''
active_no_debt = course.course_list
active_no_debt.sort()
pdflist.add_page()
pdflist.font_bold(14)
pdflist.cell(0, 10, course.full_name, 0, 1, 'C')
pdflist.font_normal(8)
height = 6
idx = 1
for c, h in zip(column, header):
pdflist.cell(c, height, h, 1)
for i in range(13):
pdflist.cell(column[-1], height, '', 1)
pdflist.ln()
for applicant in active_no_debt:
content = [idx, applicant.last_name, applicant.first_name, maybe(applicant.tag), applicant.mail, '']
for c, co in zip(column, content):
pdflist.cell(c, height, str(co), 1)
for i in range(13):
pdflist.cell(column[-1], height, '', 1)
pdflist.ln()
idx += 1
return
@login_required
def print_course_presence(course_id):
pdflist = PresenceGenerator()
course = models.Course.query.get_or_404(course_id)
list_presence(pdflist, course)
return pdflist.gen_response(course.full_name)
@login_required
def print_language_presence(language_id):
language = models.Language.query.get_or_404(language_id)
pdflist = PresenceGenerator()
for course in language.courses:
list_presence(pdflist, course)
return pdflist.gen_response(language.name)
def list_course(pdflist, course):
column = pdflist.get_column_size()
header = pdflist.get_header_texts()
def maybe(x):
return x if x else ''
active_no_debt = course.course_list
active_no_debt.sort()
pdflist.add_page()
course_str = '{0}'.format(course.full_name)
pdflist.font_bold(14)
pdflist.cell(0, 10, course_str, 0, 1, 'C')
pdflist.font_normal(8)
height = 6
idx = 1
for c, h in zip(column, header):
pdflist.cell(c, height, h, 1)
pdflist.ln()
for applicant in active_no_debt:
content = [
idx,
applicant.last_name,
applicant.first_name,
maybe(applicant.tag),
applicant.mail,
applicant.phone,
"",
"",
"",
""
]
for c, co in zip(column, content):
pdflist.cell(c, height, '{0}'.format(co), 1)
pdflist.ln()
idx += 1
@login_required
def print_course(course_id):
pdflist = CourseGenerator()
course = models.Course.query.get_or_404(course_id)
list_course(pdflist, course)
return pdflist.gen_response(course.full_name)
@login_required
def print_language(language_id):
language = models.Language.query.get_or_404(language_id)
pdflist = CourseGenerator()
for course in language.courses:
list_course(pdflist, course)
return pdflist.gen_response(language.name)
@login_required
def print_bill(applicant_id, course_id):
attendance = models.Attendance.query.get_or_404((applicant_id, course_id))
bill = BillGenerator()
bill.add_page()
title = 'Quittung'
applicant_str = '{0} {1}'.format(attendance.applicant.first_name, attendance.applicant.last_name)
tag_str = 'Matrikelnummer {0}'.format(attendance.applicant.tag) if attendance.applicant.tag else ''
now = datetime.now()
str1 = 'für die Teilnahme am Kurs:'
course_str = attendance.course.full_name
amount_str = '{0} Euro'.format(attendance.amountpaid)
str2 = 'bezahlt.'
str3 = 'Stempel'
code = 'A{0}C{1}'.format(applicant_id, course_id)
bill.cell(bill.breite, 6, code, 0, 0, 'R')
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, code, 0, 1, 'R')
bill.ln(20)
bill.font_bold(14)
bill.cell(bill.breite, 8, title, 0, 0, 'C')
bill.cell(bill.zwischenraum, 8, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 8, title, 0, 1, 'C')
bill.ln(20)
bill.font_normal(10)
bill.cell(bill.breite, 6, applicant_str, 0, 0)
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, applicant_str, 0, 1)
bill.cell(bill.breite, 6, tag_str, 0, 0)
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, tag_str, 0, 1)
bill.cell(bill.breite, 6, 'hat am {0}'.format(now.strftime("%d.%m.%Y")), 0, 0)
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, 'hat am {0}'.format(now.strftime("%d.%m.%Y")), 0, 1)
bill.cell(bill.breite, 6, str1, 0, 0)
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, str1, 0, 1)
bill.font_bold(10)
bill.cell(bill.breite, 6, course_str, 0, 0, 'C')
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, course_str, 0, 1, 'C')
bill.cell(bill.breite, 6, amount_str, 0, 0, 'C')
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, amount_str, 0, 1, 'C')
bill.font_normal(10)
bill.cell(bill.breite, 6, str2, 0, 0)
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, str2, 0, 1)
bill.ln(30)
bill.cell(bill.breite, 6, str3, 0, 0, 'C')
bill.cell(bill.zwischenraum, 6, bill.teiler, bill.rahmen, 0, 'C')
bill.cell(bill.breite, 6, str3, 0, 1, 'C')
return bill.gen_response('Quittung {0}'.format(attendance.applicant.last_name))
| mit |
zenathark/jg.waveletcodec | waveletcodec/entropy.py | 1 | 9233 | """Module for Entropy Coding Algorithms.
.. module::entropy
:platform: Unix, Windows
.. modelauthor:: Juan C Galan-Hernandez <jcgalanh@gmail.com>
"""
from __future__ import division
import itertools as it
class arithmeticb(object):
"""Class of a binary arithmetic codec implemented assuming infinite
floating point presicion
"""
_ctr = 0
_l = 0
_h = 1
_buff = 0
_output = []
_p = []
def __init__(self):
super(arithmeticb, self).__init__()
def _initialize(self, data):
self._ctr = 0
self._l = 0
self._h = 0.9999
self._buff = 0
self._output = []
#calculate frequency of 0
x = 1 - sum(data) / len(data)
self._p = [(0, x), (x, 0.9999)]
def encode(self, data):
self._initialize(data)
for i in data:
l_i, h_i = self._p[i]
d = self._h - self._l
self._h = self._l + d * h_i
self._l = self._l + d * l_i
print("l:%f h:%f") % (self._l, self._h)
r = {"payload": self._l, "model": self._p}
return r
def _dinitialize(self):
self._l = 0
self._h = 0.9999
#calculate frequency of 0
def decode(self, data):
self._dinitialize()
self._output = data["model"]
n = data["payload"]
while(n > 0):
for i, (l_i, h_i) in zip(range(len(self._p)), self._p):
if l_i <= n and n < h_i:
self._output.append(i)
d = h_i - l_i
n = (n - l_i) / d
break
return self._output
class barithmeticb(object):
"""Class of a binary arithmetic codec implemented using integer
arithmetic
"""
_underflow_bits = 0
_l = 0
_h = 1
_buff = 0
_output = []
_bit_size = 0
_scale = 0
_sigma = []
_idx = {}
_frequency = []
_accum_freq = []
def __init__(self, sigma, bit_size=16, **kargs):
super(barithmeticb, self).__init__()
self._bit_size = bit_size
self._sigma = sigma
self._idx = dict([i for i in zip(sigma, range(len(sigma)))])
self._scale = 2 ** self._bit_size - 1
if 'model' in kargs:
self._model = kargs['model']
else:
self._model = None
def _initialize(self, data):
self._l = 0
self._h = self._scale
self._buff = 0
self._output = []
#calculate frequency of 0
self._calculate_static_frequency(data)
self._calculate_accum_freq(data)
def encode(self, data):
""" given list using arithmetic encoding."""
self._initialize(data)
for i in data:
l_i = self._accum_freq[self._idx[i] - 1]
h_i = self._accum_freq[self._idx[i]]
d = self._h - self._l
self._h = int(self._l + d * (h_i / self._scale))
self._l = int(self._l + d * (l_i / self._scale))
while self._check_overflow():
pass
while self._check_underflow():
pass
print("l:%d h:%d") % (self._l, self._h)
self._output += [int(i) for i in bin(self._l)[2:]]
r = {"payload": self._output, "model": self._model}
return r
def _calculate_static_frequency(self, data):
self._frequency = [0] * (len(self._sigma))
for i in self._sigma:
self._frequency[self._idx[i]] = data.count(i)
def _calculate_accum_freq(self, data):
self._accum_freq = [0] * (len(self._sigma) + 1)
self._accum_freq[-1] = 0
accum = 0
for i in self._sigma:
self._accum_freq[self._idx[i]] = (accum +
self._frequency[self._idx[i]])
accum += self._frequency[self._idx[i]]
self._scale = accum
def _check_overflow(self):
MSB = 1 << (self._bit_size - 1)
if self._h & MSB == self._l & MSB:
for _ in range(self._underflow_bits):
self._output.append(int(not(self._h & MSB > 0)))
self._output.append(int((self._h & MSB) > 0))
self._underflow_bits = 0
self._shift()
return True
return False
def _check_underflow(self):
MSB = 1 << (self._bit_size - 2)
if self._h & MSB == 0 and self._l & MSB > 1:
self._underflow_bits += 1
low_mask = ((1 << self._bit_size - 1) |
(1 << self._bit_size - 2))
low_mask = ~low_mask & 2 ** self._bit_size - 1
self._l &= low_mask
self._shift()
self._h |= (1 << self._bit_size - 1)
return True
return False
def _shift(self):
self._l <<= 1
self._h <<= 1
self._l &= 2 ** self._bit_size - 1
self._h &= 2 ** self._bit_size - 1
self._h |= 1
def _dinitialize(self):
self._l = 0
self._h = 0.9999
#calculate frequency of 0
def decode(self, data):
self._dinitialize()
self._output = data["model"]
n = data["payload"]
while(n > 0):
for i, (l_i, h_i) in zip(range(len(self._p)), self._p):
if l_i <= n and n < h_i:
self._output.append(i)
d = h_i - l_i
n = (n - l_i) / d
break
return self._output
class abac(object):
"""Class of a binary arithmetic codec implemented using integer
arithmetic
"""
_underflow_bits = 0
_l = 0
_h = 1
_buff = 0
_output = []
_bit_size = 0
_scale = 0
_sigma = []
_idx = {}
_frequency = []
_accum_freq = []
def __init__(self, sigma, bit_size=16, **kargs):
super(abac, self).__init__()
self._bit_size = bit_size
self._sigma = sigma
self._idx = dict([i for i in zip(sigma, range(len(sigma)))])
self._scale = 2 ** self._bit_size - 1
if 'model' in kargs:
self._model = kargs['model']
else:
self._model = None
def _initialize(self):
self._l = 0
self._h = self._scale
self._buff = 0
self._output = []
#calculate frequency of 0
self._accum_freq = [0] * (len(self._sigma) + 1)
self._accum_freq[-1] = 0
self._frequency = [1] * (len(self._sigma))
def encode(self, data):
""" given list using arithmetic encoding."""
self._initialize()
for i in data:
self.push(i)
return self.get_current_stream()
def push(self, symbol):
self._frequency[self._idx[symbol]] += 1
self._calculate_accum_freq()
l_i = self._accum_freq[self._idx[symbol] - 1]
h_i = self._accum_freq[self._idx[symbol]]
d = self._h - self._l
self._h = int(self._l + d * (h_i / self._scale))
self._l = int(self._l + d * (l_i / self._scale))
while self._check_overflow():
pass
while self._check_underflow():
pass
def get_current_stream(self):
output = self._output + [int(i) for i in bin(self._l)[2:]]
r = {"payload": output, "model": self._model}
return r
def _calculate_accum_freq(self):
accum = 0
for i in self._sigma:
self._accum_freq[self._idx[i]] = (accum +
self._frequency[self._idx[i]])
accum += self._frequency[self._idx[i]]
self._scale = accum
def _check_overflow(self):
MSB = 1 << (self._bit_size - 1)
if self._h & MSB == self._l & MSB:
for _ in range(self._underflow_bits):
self._output.append(int(not(self._h & MSB > 0)))
self._output.append(int((self._h & MSB) > 0))
self._underflow_bits = 0
self._shift()
return True
return False
def _check_underflow(self):
MSB = 1 << (self._bit_size - 2)
if self._h & MSB == 0 and self._l & MSB > 1:
self._underflow_bits += 1
low_mask = ((1 << self._bit_size - 1) |
(1 << self._bit_size - 2))
low_mask = ~low_mask & 2 ** self._bit_size - 1
self._l &= low_mask
self._shift()
self._h |= (1 << self._bit_size - 1)
return True
return False
def _shift(self):
self._l <<= 1
self._h <<= 1
self._l &= 2 ** self._bit_size - 1
self._h &= 2 ** self._bit_size - 1
self._h |= 1
def _dinitialize(self):
self._l = 0
self._h = 0.9999
#calculate frequency of 0
def decode(self, data):
self._dinitialize()
self._output = data["model"]
n = data["payload"]
while(n > 0):
for i, (l_i, h_i) in zip(range(len(self._p)), self._p):
if l_i <= n and n < h_i:
self._output.append(i)
d = h_i - l_i
n = (n - l_i) / d
break
return self._output
def length(self):
return len(self._output) + self._bit_size
| mit |
gerashegalov/Impala | tests/metadata/test_explain.py | 13 | 6421 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Functional tests running EXPLAIN statements.
#
import logging
import pytest
import re
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3
from tests.util.filesystem_utils import WAREHOUSE
# Tests the different explain levels [0-3] on a few queries.
# TODO: Clean up this test to use an explain level test dimension and appropriate
# result sub-sections for the expected explain plans.
class TestExplain(ImpalaTestSuite):
# Value for the num_scanner_threads query option to ensure that the memory estimates of
# scan nodes are consistent even when run on machines with different numbers of cores.
NUM_SCANNER_THREADS = 1
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestExplain, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none' and\
v.get_value('exec_option')['batch_size'] == 0 and\
v.get_value('exec_option')['disable_codegen'] == False and\
v.get_value('exec_option')['num_nodes'] != 1)
@pytest.mark.xfail(run=False, reason="Expected per-host mem requirements inconsistent")
def test_explain_level0(self, vector):
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
vector.get_value('exec_option')['explain_level'] = 0
self.run_test_case('QueryTest/explain-level0', vector)
@pytest.mark.xfail(run=False, reason="Expected per-host mem requirements inconsistent")
def test_explain_level1(self, vector):
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
vector.get_value('exec_option')['explain_level'] = 1
self.run_test_case('QueryTest/explain-level1', vector)
@pytest.mark.xfail(run=False, reason="The test for missing table stats fails for avro")
def test_explain_level2(self, vector):
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
vector.get_value('exec_option')['explain_level'] = 2
self.run_test_case('QueryTest/explain-level2', vector)
@pytest.mark.xfail(run=False, reason="The test for missing table stats fails for avro")
def test_explain_level3(self, vector):
vector.get_value('exec_option')['num_scanner_threads'] = self.NUM_SCANNER_THREADS
vector.get_value('exec_option')['explain_level'] = 3
self.run_test_case('QueryTest/explain-level3', vector)
def test_explain_validate_cardinality_estimates(self, vector):
# Tests that the cardinality estimates are correct for partitioned tables.
# TODO Cardinality estimation tests should eventually be part of the planner tests.
# TODO Remove this test
db_name = 'functional'
tbl_name = 'alltypes'
def check_cardinality(query_result, expected_cardinality):
regex = re.compile('tuple-ids=\d+ row-size=\d+B cardinality=(\d+)')
for res in query_result:
m = regex.match(res.strip())
if m:
assert len(m.groups()) == 1
# The cardinality should be zero.
assert m.groups()[0] == expected_cardinality
# All partitions are filtered out, cardinality should be 0.
result = self.execute_query("explain select * from %s.%s where year = 1900" % (
db_name, tbl_name), query_options={'explain_level':3})
check_cardinality(result.data, '0')
# Half of the partitions are filtered out, cardinality should be 3650.
result = self.execute_query("explain select * from %s.%s where year = 2010" % (
db_name, tbl_name), query_options={'explain_level':3})
check_cardinality(result.data, '3650')
# None of the partitions are filtered out, cardinality should be 7300.
result = self.execute_query("explain select * from %s.%s" % (db_name, tbl_name),
query_options={'explain_level':3})
check_cardinality(result.data, '7300')
class TestExplainEmptyPartition(ImpalaTestSuite):
TEST_DB_NAME = "imp_1708"
def setup_method(self, method):
self.cleanup_db(self.TEST_DB_NAME)
self.execute_query("create database if not exists {0} location '{1}/{0}.db'"
.format(self.TEST_DB_NAME, WAREHOUSE))
def teardown_method(self, method):
self.cleanup_db(self.TEST_DB_NAME)
@SkipIfS3.hdfs_client
def test_non_empty_partition_0_rows(self):
"""Regression test for IMPALA-1708: if a partition has 0 rows but > 0 files after
COMPUTE STATS, don't warn the user about missing stats. The files are probably
corrupted, or used for something else."""
self.client.execute("SET EXPLAIN_LEVEL=3")
self.client.execute(
"CREATE TABLE %s.empty_partition (col int) partitioned by (p int)" % self.TEST_DB_NAME);
self.client.execute(
"ALTER TABLE %s.empty_partition ADD PARTITION (p=NULL)" % self.TEST_DB_NAME)
# Put an empty file in the partition so we have > 0 files, but 0 rows
self.hdfs_client.create_file(
"test-warehouse/%s.db/empty_partition/p=__HIVE_DEFAULT_PARTITION__/empty" %
self.TEST_DB_NAME, "")
self.client.execute("REFRESH %s.empty_partition" % self.TEST_DB_NAME)
self.client.execute("COMPUTE STATS %s.empty_partition" % self.TEST_DB_NAME)
assert "NULL\t0\t1" in str(
self.client.execute("SHOW PARTITIONS %s.empty_partition" % self.TEST_DB_NAME))
assert "missing relevant table and/or column statistics" not in str(
self.client.execute("EXPLAIN SELECT * FROM %s.empty_partition" % self.TEST_DB_NAME))
# Now add a partition with some data (so it gets selected into the scan), to check
# that its lack of stats is correctly identified
self.client.execute(
"ALTER TABLE %s.empty_partition ADD PARTITION (p=1)" % self.TEST_DB_NAME)
self.hdfs_client.create_file("test-warehouse/%s.db/empty_partition/p=1/rows" %
self.TEST_DB_NAME, "1")
self.client.execute("REFRESH %s.empty_partition" % self.TEST_DB_NAME)
explain_result = str(
self.client.execute("EXPLAIN SELECT * FROM %s.empty_partition" % self.TEST_DB_NAME))
assert "missing relevant table and/or column statistics" in explain_result
# Also test IMPALA-1530 - adding the number of partitions missing stats
assert "1 partition(s) missing stats" in explain_result
| apache-2.0 |
resmo/ansible | lib/ansible/modules/storage/netapp/na_ontap_net_ifgrp.py | 38 | 11541 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_net_ifgrp
short_description: NetApp Ontap modify network interface group
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create, modify ports, destroy the network interface group
options:
state:
description:
- Whether the specified network interface group should exist or not.
choices: ['present', 'absent']
default: present
distribution_function:
description:
- Specifies the traffic distribution function for the ifgrp.
choices: ['mac', 'ip', 'sequential', 'port']
name:
description:
- Specifies the interface group name.
required: true
mode:
description:
- Specifies the link policy for the ifgrp.
node:
description:
- Specifies the name of node.
required: true
ports:
aliases:
- port
description:
- List of expected ports to be present in the interface group.
- If a port is present in this list, but not on the target, it will be added.
- If a port is not in the list, but present on the target, it will be removed.
- Make sure the list contains all ports you want to see on the target.
version_added: '2.8'
"""
EXAMPLES = """
- name: create ifgrp
na_ontap_net_ifgrp:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
distribution_function: ip
name: a0c
ports: [e0a]
mode: multimode
node: "{{ Vsim node name }}"
- name: modify ports in an ifgrp
na_ontap_net_ifgrp:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
distribution_function: ip
name: a0c
port: [e0a, e0c]
mode: multimode
node: "{{ Vsim node name }}"
- name: delete ifgrp
na_ontap_net_ifgrp:
state: absent
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
name: a0c
node: "{{ Vsim node name }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp_module import NetAppModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapIfGrp(object):
"""
Create, Modifies and Destroys a IfGrp
"""
def __init__(self):
"""
Initialize the Ontap IfGrp class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
distribution_function=dict(required=False, type='str', choices=['mac', 'ip', 'sequential', 'port']),
name=dict(required=True, type='str'),
mode=dict(required=False, type='str'),
node=dict(required=True, type='str'),
ports=dict(required=False, type='list', aliases=["port"]),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['distribution_function', 'mode'])
],
supports_check_mode=True
)
# set up variables
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def get_if_grp(self):
"""
Return details about the if_group
:param:
name : Name of the if_group
:return: Details about the if_group. None if not found.
:rtype: dict
"""
if_group_iter = netapp_utils.zapi.NaElement('net-port-get-iter')
if_group_info = netapp_utils.zapi.NaElement('net-port-info')
if_group_info.add_new_child('port', self.parameters['name'])
if_group_info.add_new_child('port-type', 'if_group')
if_group_info.add_new_child('node', self.parameters['node'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(if_group_info)
if_group_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(if_group_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting if_group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
return_value = None
if result.get_child_by_name('num-records') and int(result['num-records']) >= 1:
if_group_attributes = result['attributes-list']['net-port-info']
return_value = {
'name': if_group_attributes['port'],
'distribution_function': if_group_attributes['ifgrp-distribution-function'],
'mode': if_group_attributes['ifgrp-mode'],
'node': if_group_attributes['node'],
}
return return_value
def get_if_grp_ports(self):
"""
Return ports of the if_group
:param:
name : Name of the if_group
:return: Ports of the if_group. None if not found.
:rtype: dict
"""
if_group_iter = netapp_utils.zapi.NaElement('net-port-ifgrp-get')
if_group_iter.add_new_child('ifgrp-name', self.parameters['name'])
if_group_iter.add_new_child('node', self.parameters['node'])
try:
result = self.server.invoke_successfully(if_group_iter, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting if_group ports %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
port_list = []
if result.get_child_by_name('attributes'):
if_group_attributes = result['attributes']['net-ifgrp-info']
if if_group_attributes.get_child_by_name('ports'):
ports = if_group_attributes.get_child_by_name('ports').get_children()
for each in ports:
port_list.append(each.get_content())
return {'ports': port_list}
def create_if_grp(self):
"""
Creates a new ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-create")
route_obj.add_new_child("distribution-function", self.parameters['distribution_function'])
route_obj.add_new_child("ifgrp-name", self.parameters['name'])
route_obj.add_new_child("mode", self.parameters['mode'])
route_obj.add_new_child("node", self.parameters['node'])
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating if_group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
for port in self.parameters.get('ports'):
self.add_port_to_if_grp(port)
def delete_if_grp(self):
"""
Deletes a ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-destroy")
route_obj.add_new_child("ifgrp-name", self.parameters['name'])
route_obj.add_new_child("node", self.parameters['node'])
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting if_group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def add_port_to_if_grp(self, port):
"""
adds port to a ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-add-port")
route_obj.add_new_child("ifgrp-name", self.parameters['name'])
route_obj.add_new_child("port", port)
route_obj.add_new_child("node", self.parameters['node'])
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error adding port %s to if_group %s: %s' %
(port, self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def modify_ports(self, current_ports):
add_ports = set(self.parameters['ports']) - set(current_ports)
remove_ports = set(current_ports) - set(self.parameters['ports'])
for port in add_ports:
self.add_port_to_if_grp(port)
for port in remove_ports:
self.remove_port_to_if_grp(port)
def remove_port_to_if_grp(self, port):
"""
removes port from a ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-remove-port")
route_obj.add_new_child("ifgrp-name", self.parameters['name'])
route_obj.add_new_child("port", port)
route_obj.add_new_child("node", self.parameters['node'])
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error removing port %s to if_group %s: %s' %
(port, self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_net_ifgrp", cserver)
def apply(self):
self.autosupport_log()
current, modify = self.get_if_grp(), None
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if cd_action is None and self.parameters['state'] == 'present':
current_ports = self.get_if_grp_ports()
modify = self.na_helper.get_modified_attributes(current_ports, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if cd_action == 'create':
self.create_if_grp()
elif cd_action == 'delete':
self.delete_if_grp()
elif modify:
self.modify_ports(current_ports['ports'])
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Creates the NetApp Ontap Net Route object and runs the correct play task
"""
obj = NetAppOntapIfGrp()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
davidam/python-examples | basics/try-simple.py | 1 | 2408 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import os
import signal
nombre = str(input("Indica el nombre del archivo: "))
try:
fichero = open(nombre, "r+")
except FileNotFoundError:
print("The program has not found the file, it stops.")
os.kill(os.getpid(), signal.SIGUSR1)
print("The program has found the file, it continues.")
a = []
a.append(int(input("Give me an element for my array: ")))
a.append(int(input("Give me another element for my array: ")))
a.append(int(input("Give me the last element for my array: ")))
try:
print("Second element = %d" %(a[1]))
# Throws error since there are only 3 elements in array
print("Third element = %d" %(a[2]))
except IndexError:
print("The program has troubles with the array indexes")
os.kill(os.getpid(), signal.SIGUSR1)
print("The program don't have a trouble with the array indexes")
# Program to handle multiple errors with one except statement
n1 = int(input("Give me a number: "))
n2 = int(input("Give me another number: "))
try :
m = n1 / n2
# throws NameError if a >= 4
print("Value of m = %s" % m)
# note that braces () are necessary here for multiple exceptions
except(ZeroDivisionError, NameError):
print("\nThe program has troubles with the zero division")
os.kill(os.getpid(), signal.SIGUSR1)
print("The programa has not troubles with the zero division")
try:
raise NameError("Hi there") # Raise Error
except NameError:
print("We force a raise exception")
raise # To determine whether the exception was raised or not
| gpl-3.0 |
pavlova-marina/QGIS | python/ext-libs/owslib/crs.py | 30 | 21380 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2011 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" API for OGC CRS constructs. """
# list of URN codes for EPSG in which axis order
# of coordinates are y,x (e.g. lat, long)
axisorder_yx = frozenset([
4326,
4258,
31466,
31467,
31468,
31469,
2166,
2167,
2168,
2036,
2044,
2045,
2065,
2081,
2082,
2083,
2085,
2086,
2091,
2092,
2093,
2096,
2097,
2098,
2105,
2106,
2107,
2108,
2109,
2110,
2111,
2112,
2113,
2114,
2115,
2116,
2117,
2118,
2119,
2120,
2121,
2122,
2123,
2124,
2125,
2126,
2127,
2128,
2129,
2130,
2131,
2132,
2169,
2170,
2171,
2172,
2173,
2174,
2175,
2176,
2177,
2178,
2179,
2180,
2193,
2199,
2200,
2206,
2207,
2208,
2209,
2210,
2211,
2212,
2319,
2320,
2321,
2322,
2323,
2324,
2325,
2326,
2327,
2328,
2329,
2330,
2331,
2332,
2333,
2334,
2335,
2336,
2337,
2338,
2339,
2340,
2341,
2342,
2343,
2344,
2345,
2346,
2347,
2348,
2349,
2350,
2351,
2352,
2353,
2354,
2355,
2356,
2357,
2358,
2359,
2360,
2361,
2362,
2363,
2364,
2365,
2366,
2367,
2368,
2369,
2370,
2371,
2372,
2373,
2374,
2375,
2376,
2377,
2378,
2379,
2380,
2381,
2382,
2383,
2384,
2385,
2386,
2387,
2388,
2389,
2390,
2391,
2392,
2393,
2394,
2395,
2396,
2397,
2398,
2399,
2400,
2401,
2402,
2403,
2404,
2405,
2406,
2407,
2408,
2409,
2410,
2411,
2412,
2413,
2414,
2415,
2416,
2417,
2418,
2419,
2420,
2421,
2422,
2423,
2424,
2425,
2426,
2427,
2428,
2429,
2430,
2431,
2432,
2433,
2434,
2435,
2436,
2437,
2438,
2439,
2440,
2441,
2442,
2443,
2444,
2445,
2446,
2447,
2448,
2449,
2450,
2451,
2452,
2453,
2454,
2455,
2456,
2457,
2458,
2459,
2460,
2461,
2462,
2463,
2464,
2465,
2466,
2467,
2468,
2469,
2470,
2471,
2472,
2473,
2474,
2475,
2476,
2477,
2478,
2479,
2480,
2481,
2482,
2483,
2484,
2485,
2486,
2487,
2488,
2489,
2490,
2491,
2492,
2493,
2494,
2495,
2496,
2497,
2498,
2499,
2500,
2501,
2502,
2503,
2504,
2505,
2506,
2507,
2508,
2509,
2510,
2511,
2512,
2513,
2514,
2515,
2516,
2517,
2518,
2519,
2520,
2521,
2522,
2523,
2524,
2525,
2526,
2527,
2528,
2529,
2530,
2531,
2532,
2533,
2534,
2535,
2536,
2537,
2538,
2539,
2540,
2541,
2542,
2543,
2544,
2545,
2546,
2547,
2548,
2549,
2551,
2552,
2553,
2554,
2555,
2556,
2557,
2558,
2559,
2560,
2561,
2562,
2563,
2564,
2565,
2566,
2567,
2568,
2569,
2570,
2571,
2572,
2573,
2574,
2575,
2576,
2577,
2578,
2579,
2580,
2581,
2582,
2583,
2584,
2585,
2586,
2587,
2588,
2589,
2590,
2591,
2592,
2593,
2594,
2595,
2596,
2597,
2598,
2599,
2600,
2601,
2602,
2603,
2604,
2605,
2606,
2607,
2608,
2609,
2610,
2611,
2612,
2613,
2614,
2615,
2616,
2617,
2618,
2619,
2620,
2621,
2622,
2623,
2624,
2625,
2626,
2627,
2628,
2629,
2630,
2631,
2632,
2633,
2634,
2635,
2636,
2637,
2638,
2639,
2640,
2641,
2642,
2643,
2644,
2645,
2646,
2647,
2648,
2649,
2650,
2651,
2652,
2653,
2654,
2655,
2656,
2657,
2658,
2659,
2660,
2661,
2662,
2663,
2664,
2665,
2666,
2667,
2668,
2669,
2670,
2671,
2672,
2673,
2674,
2675,
2676,
2677,
2678,
2679,
2680,
2681,
2682,
2683,
2684,
2685,
2686,
2687,
2688,
2689,
2690,
2691,
2692,
2693,
2694,
2695,
2696,
2697,
2698,
2699,
2700,
2701,
2702,
2703,
2704,
2705,
2706,
2707,
2708,
2709,
2710,
2711,
2712,
2713,
2714,
2715,
2716,
2717,
2718,
2719,
2720,
2721,
2722,
2723,
2724,
2725,
2726,
2727,
2728,
2729,
2730,
2731,
2732,
2733,
2734,
2735,
2738,
2739,
2740,
2741,
2742,
2743,
2744,
2745,
2746,
2747,
2748,
2749,
2750,
2751,
2752,
2753,
2754,
2755,
2756,
2757,
2758,
2935,
2936,
2937,
2938,
2939,
2940,
2941,
2953,
2963,
3006,
3007,
3008,
3009,
3010,
3011,
3012,
3013,
3014,
3015,
3016,
3017,
3018,
3019,
3020,
3021,
3022,
3023,
3024,
3025,
3026,
3027,
3028,
3029,
3030,
3034,
3035,
3038,
3039,
3040,
3041,
3042,
3043,
3044,
3045,
3046,
3047,
3048,
3049,
3050,
3051,
3058,
3059,
3068,
3114,
3115,
3116,
3117,
3118,
3120,
3126,
3127,
3128,
3129,
3130,
3131,
3132,
3133,
3134,
3135,
3136,
3137,
3138,
3139,
3140,
3146,
3147,
3150,
3151,
3152,
3300,
3301,
3328,
3329,
3330,
3331,
3332,
3333,
3334,
3335,
3346,
3350,
3351,
3352,
3366,
3386,
3387,
3388,
3389,
3390,
3396,
3397,
3398,
3399,
3407,
3414,
3416,
3764,
3788,
3789,
3790,
3791,
3793,
3795,
3796,
3819,
3821,
3823,
3824,
3833,
3834,
3835,
3836,
3837,
3838,
3839,
3840,
3841,
3842,
3843,
3844,
3845,
3846,
3847,
3848,
3849,
3850,
3851,
3852,
3854,
3873,
3874,
3875,
3876,
3877,
3878,
3879,
3880,
3881,
3882,
3883,
3884,
3885,
3888,
3889,
3906,
3907,
3908,
3909,
3910,
3911,
4001,
4002,
4003,
4004,
4005,
4006,
4007,
4008,
4009,
4010,
4011,
4012,
4013,
4014,
4015,
4016,
4017,
4018,
4019,
4020,
4021,
4022,
4023,
4024,
4025,
4026,
4027,
4028,
4029,
4030,
4031,
4032,
4033,
4034,
4035,
4036,
4037,
4038,
4040,
4041,
4042,
4043,
4044,
4045,
4046,
4047,
4052,
4053,
4054,
4055,
4074,
4075,
4080,
4081,
4120,
4121,
4122,
4123,
4124,
4125,
4126,
4127,
4128,
4129,
4130,
4131,
4132,
4133,
4134,
4135,
4136,
4137,
4138,
4139,
4140,
4141,
4142,
4143,
4144,
4145,
4146,
4147,
4148,
4149,
4150,
4151,
4152,
4153,
4154,
4155,
4156,
4157,
4158,
4159,
4160,
4161,
4162,
4163,
4164,
4165,
4166,
4167,
4168,
4169,
4170,
4171,
4172,
4173,
4174,
4175,
4176,
4178,
4179,
4180,
4181,
4182,
4183,
4184,
4185,
4188,
4189,
4190,
4191,
4192,
4193,
4194,
4195,
4196,
4197,
4198,
4199,
4200,
4201,
4202,
4203,
4204,
4205,
4206,
4207,
4208,
4209,
4210,
4211,
4212,
4213,
4214,
4215,
4216,
4218,
4219,
4220,
4221,
4222,
4223,
4224,
4225,
4226,
4227,
4228,
4229,
4230,
4231,
4232,
4233,
4234,
4235,
4236,
4237,
4238,
4239,
4240,
4241,
4242,
4243,
4244,
4245,
4246,
4247,
4248,
4249,
4250,
4251,
4252,
4253,
4254,
4255,
4256,
4257,
4259,
4260,
4261,
4262,
4263,
4264,
4265,
4266,
4267,
4268,
4269,
4270,
4271,
4272,
4273,
4274,
4275,
4276,
4277,
4278,
4279,
4280,
4281,
4282,
4283,
4284,
4285,
4286,
4287,
4288,
4289,
4291,
4292,
4293,
4294,
4295,
4296,
4297,
4298,
4299,
4300,
4301,
4302,
4303,
4304,
4306,
4307,
4308,
4309,
4310,
4311,
4312,
4313,
4314,
4315,
4316,
4317,
4318,
4319,
4322,
4324,
4327,
4329,
4339,
4341,
4343,
4345,
4347,
4349,
4351,
4353,
4355,
4357,
4359,
4361,
4363,
4365,
4367,
4369,
4371,
4373,
4375,
4377,
4379,
4381,
4383,
4386,
4388,
4417,
4434,
4463,
4466,
4469,
4470,
4472,
4475,
4480,
4482,
4483,
4490,
4491,
4492,
4493,
4494,
4495,
4496,
4497,
4498,
4499,
4500,
4501,
4502,
4503,
4504,
4505,
4506,
4507,
4508,
4509,
4510,
4511,
4512,
4513,
4514,
4515,
4516,
4517,
4518,
4519,
4520,
4521,
4522,
4523,
4524,
4525,
4526,
4527,
4528,
4529,
4530,
4531,
4532,
4533,
4534,
4535,
4536,
4537,
4538,
4539,
4540,
4541,
4542,
4543,
4544,
4545,
4546,
4547,
4548,
4549,
4550,
4551,
4552,
4553,
4554,
4555,
4557,
4558,
4568,
4569,
4570,
4571,
4572,
4573,
4574,
4575,
4576,
4577,
4578,
4579,
4580,
4581,
4582,
4583,
4584,
4585,
4586,
4587,
4588,
4589,
4600,
4601,
4602,
4603,
4604,
4605,
4606,
4607,
4608,
4609,
4610,
4611,
4612,
4613,
4614,
4615,
4616,
4617,
4618,
4619,
4620,
4621,
4622,
4623,
4624,
4625,
4626,
4627,
4628,
4629,
4630,
4631,
4632,
4633,
4634,
4635,
4636,
4637,
4638,
4639,
4640,
4641,
4642,
4643,
4644,
4645,
4646,
4652,
4653,
4654,
4655,
4656,
4657,
4658,
4659,
4660,
4661,
4662,
4663,
4664,
4665,
4666,
4667,
4668,
4669,
4670,
4671,
4672,
4673,
4674,
4675,
4676,
4677,
4678,
4679,
4680,
4681,
4682,
4683,
4684,
4685,
4686,
4687,
4688,
4689,
4690,
4691,
4692,
4693,
4694,
4695,
4696,
4697,
4698,
4699,
4700,
4701,
4702,
4703,
4704,
4705,
4706,
4707,
4708,
4709,
4710,
4711,
4712,
4713,
4714,
4715,
4716,
4717,
4718,
4719,
4720,
4721,
4722,
4723,
4724,
4725,
4726,
4727,
4728,
4729,
4730,
4731,
4732,
4733,
4734,
4735,
4736,
4737,
4738,
4739,
4740,
4741,
4742,
4743,
4744,
4745,
4746,
4747,
4748,
4749,
4750,
4751,
4752,
4753,
4754,
4755,
4756,
4757,
4758,
4759,
4760,
4761,
4762,
4763,
4764,
4765,
4766,
4767,
4768,
4769,
4770,
4771,
4772,
4773,
4774,
4775,
4776,
4777,
4778,
4779,
4780,
4781,
4782,
4783,
4784,
4785,
4786,
4787,
4788,
4789,
4790,
4791,
4792,
4793,
4794,
4795,
4796,
4797,
4798,
4799,
4800,
4801,
4802,
4803,
4804,
4805,
4806,
4807,
4808,
4809,
4810,
4811,
4812,
4813,
4814,
4815,
4816,
4817,
4818,
4819,
4820,
4821,
4822,
4823,
4824,
4839,
4855,
4856,
4857,
4858,
4859,
4860,
4861,
4862,
4863,
4864,
4865,
4866,
4867,
4868,
4869,
4870,
4871,
4872,
4873,
4874,
4875,
4876,
4877,
4878,
4879,
4880,
4883,
4885,
4887,
4889,
4891,
4893,
4895,
4898,
4900,
4901,
4902,
4903,
4904,
4907,
4909,
4921,
4923,
4925,
4927,
4929,
4931,
4933,
4935,
4937,
4939,
4941,
4943,
4945,
4947,
4949,
4951,
4953,
4955,
4957,
4959,
4961,
4963,
4965,
4967,
4969,
4971,
4973,
4975,
4977,
4979,
4981,
4983,
4985,
4987,
4989,
4991,
4993,
4995,
4997,
4999,
5012,
5013,
5017,
5048,
5105,
5106,
5107,
5108,
5109,
5110,
5111,
5112,
5113,
5114,
5115,
5116,
5117,
5118,
5119,
5120,
5121,
5122,
5123,
5124,
5125,
5126,
5127,
5128,
5129,
5130,
5132,
5167,
5168,
5169,
5170,
5171,
5172,
5173,
5174,
5175,
5176,
5177,
5178,
5179,
5180,
5181,
5182,
5183,
5184,
5185,
5186,
5187,
5188,
5224,
5228,
5229,
5233,
5245,
5246,
5251,
5252,
5253,
5254,
5255,
5256,
5257,
5258,
5259,
5263,
5264,
5269,
5270,
5271,
5272,
5273,
5274,
5275,
5801,
5802,
5803,
5804,
5808,
5809,
5810,
5811,
5812,
5813,
5814,
5815,
5816,
20004,
20005,
20006,
20007,
20008,
20009,
20010,
20011,
20012,
20013,
20014,
20015,
20016,
20017,
20018,
20019,
20020,
20021,
20022,
20023,
20024,
20025,
20026,
20027,
20028,
20029,
20030,
20031,
20032,
20064,
20065,
20066,
20067,
20068,
20069,
20070,
20071,
20072,
20073,
20074,
20075,
20076,
20077,
20078,
20079,
20080,
20081,
20082,
20083,
20084,
20085,
20086,
20087,
20088,
20089,
20090,
20091,
20092,
21413,
21414,
21415,
21416,
21417,
21418,
21419,
21420,
21421,
21422,
21423,
21453,
21454,
21455,
21456,
21457,
21458,
21459,
21460,
21461,
21462,
21463,
21473,
21474,
21475,
21476,
21477,
21478,
21479,
21480,
21481,
21482,
21483,
21896,
21897,
21898,
21899,
22171,
22172,
22173,
22174,
22175,
22176,
22177,
22181,
22182,
22183,
22184,
22185,
22186,
22187,
22191,
22192,
22193,
22194,
22195,
22196,
22197,
25884,
27205,
27206,
27207,
27208,
27209,
27210,
27211,
27212,
27213,
27214,
27215,
27216,
27217,
27218,
27219,
27220,
27221,
27222,
27223,
27224,
27225,
27226,
27227,
27228,
27229,
27230,
27231,
27232,
27391,
27392,
27393,
27394,
27395,
27396,
27397,
27398,
27492,
28402,
28403,
28404,
28405,
28406,
28407,
28408,
28409,
28410,
28411,
28412,
28413,
28414,
28415,
28416,
28417,
28418,
28419,
28420,
28421,
28422,
28423,
28424,
28425,
28426,
28427,
28428,
28429,
28430,
28431,
28432,
28462,
28463,
28464,
28465,
28466,
28467,
28468,
28469,
28470,
28471,
28472,
28473,
28474,
28475,
28476,
28477,
28478,
28479,
28480,
28481,
28482,
28483,
28484,
28485,
28486,
28487,
28488,
28489,
28490,
28491,
28492,
29701,
29702,
30161,
30162,
30163,
30164,
30165,
30166,
30167,
30168,
30169,
30170,
30171,
30172,
30173,
30174,
30175,
30176,
30177,
30178,
30179,
30800,
31251,
31252,
31253,
31254,
31255,
31256,
31257,
31258,
31259,
31275,
31276,
31277,
31278,
31279,
31281,
31282,
31283,
31284,
31285,
31286,
31287,
31288,
31289,
31290,
31700
])
class Crs(object):
"""Initialize a CRS construct
:param string crs: the Coordinate reference system. Examples:
* EPSG:<EPSG code>
* http://www.opengis.net/def/crs/EPSG/0/<EPSG code> (URI Style 1)
* http://www.opengis.net/gml/srs/epsg.xml#<EPSG code> (URI Style 2)
* urn:EPSG:geographicCRS:<epsg code>
* urn:ogc:def:crs:EPSG::4326
* urn:ogc:def:crs:EPSG:4326
:param string axisorder: Force / override axisorder ('xy' or 'yx')
"""
def __init__(self, crs, axisorder=None):
self.id = crs
self.naming_authority = None
self.category = None
self.type = None
self.authority = None
self.version = None
self.code = -1
self.axisorder = 'xy'
self.encoding = "code"
if axisorder is not None: # forced axisorder
self.axisorder = axisorder
values = self.id.split(':')
if self.id.find('/def/crs/') != -1: # URI Style 1
self.encoding = "uri"
vals = self.id.split('/')
self.authority = vals[5].upper()
self.code = int(vals[-1])
elif self.id.find('#') != -1: # URI Style 2
self.encoding = "uri"
vals = self.id.split('#')
self.authority = vals[0].split('/')[-1].split('.')[0].upper()
self.code = int(vals[-1])
elif len(values) > 2: # it's a URN style
self.naming_authority = values[1]
self.encoding = "urn"
if len(values) == 3: # bogus
pass
elif len(values) == 4:
self.type = values[2]
else:
self.category = values[2]
self.type = values[3]
self.authority = values[4].upper()
if len(values) == 7: # version, even if empty, is included
if values[5]:
self.version = values[5]
# code is always the last value
try:
self.code = int(values[-1])
except:
self.code = values[-1]
elif len(values) == 2: # it's an authority:code code
self.encoding = "code"
self.authority = values[0].upper()
self.code = int(values[1])
# if the user has not forced the axisorder,
# scan the list of codes that have an axis ordering of
# yx and set axis order accordingly
if axisorder is None:
if self.code in axisorder_yx:
self.axisorder = 'yx'
def getcode(self):
"""Create for example "EPSG:4326" string and return back
:returns: String code formated in "authority:code"
"""
if self.authority is not None and self.code is not None:
return '%s:%s' % (self.authority, self.code)
return None
def getcodeurn(self):
"""Create for example "urn:ogc:def:crs:EPSG::4326" string and return back
:returns: String code formated in "urn:ogc:def:authority:code"
"""
return 'urn:%s:def:crs:%s:%s:%s' % (
(self.naming_authority and self.naming_authority or "ogc"),
(self.authority or ""),
(self.version or ""),
(self.code or ""))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.getcodeurn() == other.getcodeurn()
else:
return False
def __repr__(self):
return self.getcodeurn()
| gpl-2.0 |
gbiggs/pykg-config | pykg_config/props.py | 2 | 2895 | # Copyright (c) 2009-2012, Geoffrey Biggs
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Geoffrey Biggs nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# File: props.py
# Author: Geoffrey Biggs
# Part of pykg-config.
"""Defines the empty property dictionaries.
These are defined here to keep them together for ease of maintenance
when new properties are added.
"""
__version__ = "$Revision: $"
# $Source$
from pykg_config.version import Version
empty_raw_props = {'name': '',
'description': '',
'url': '',
'version': '',
'requires': '',
'requires.private': '',
'conflicts': '',
'cflags': '',
'libs': '',
'libs.private': ''}
empty_processed_props = {'name': '',
'description': 'No description',
'url': '',
'version': Version(),
'requires': [],
'requires.private': [],
'conflicts': [],
'include_dirs': [],
'other_cflags': [],
'libs': [],
'libpaths': [],
'otherlibs': [],
'private.libs': [],
'private.libpaths': [],
'private.otherlibs': []}
# vim: tw=79
| bsd-3-clause |
thensgens/vvs | src/werkzeug/testsuite/contrib/cache.py | 61 | 4468 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
except ImportError:
redis = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in xrange(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', 'Awesome')
self.assert_equal(c.get('foo'), 'Awesome')
c._client.set(c.key_prefix + 'foo', '42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
assert c.inc('foo') == 2
assert c.dec('foo') == 1
c.delete('foo')
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
assert c.get('foo') == True
c.set('bar', False)
assert c.get('bar') == False
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
return suite
| mit |
rajsadho/django | tests/forms_tests/tests/test_input_formats.py | 313 | 38501 | from datetime import date, datetime, time
from django import forms
from django.test import SimpleTestCase, override_settings
from django.utils.translation import activate, deactivate
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"], USE_L10N=True)
class LocalizedTimeTests(SimpleTestCase):
def setUp(self):
# nl/formats.py has customized TIME_INPUT_FORMATS:
# ['%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M']
activate('nl')
def tearDown(self):
deactivate()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
# ISO formats are accepted, even if not specified in formats.py
result = f.clean('13:30:05.000155')
self.assertEqual(result, time(13, 30, 5, 155))
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"])
class CustomTimeInputFormatsTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('01:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM")
class SimpleTimeFormatTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField(self):
"Localized TimeFields in a non-localized environment act as unlocalized widgets"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"])
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True)
# Parse a time in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(DATE_INPUT_FORMATS=["%d/%m/%Y", "%d-%m-%Y"], USE_L10N=True)
class LocalizedDateTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21'), date(2010, 12, 21))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
self.assertRaises(forms.ValidationError, f.clean, '21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"])
class CustomDateInputFormatsTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010")
class SimpleDateFormatTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField(self):
"Localized DateFields in a non-localized environment act as unlocalized widgets"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"], USE_L10N=True)
class LocalizedDateTimeTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
self.assertRaises(forms.ValidationError, f.clean, '1:30:05 PM 21/12/2010')
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"])
class CustomDateTimeInputFormatsTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
class SimpleDateTimeFormatTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_localized_dateTimeField(self):
"Localized DateTimeFields in a non-localized environment act as unlocalized widgets"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
self.assertRaises(forms.ValidationError, f.clean, '2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget._format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
| bsd-3-clause |
fduraffourg/servo | tests/wpt/harness/wptrunner/tests/test_update.py | 97 | 15333 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
import StringIO
from .. import metadata, manifestupdate
from mozlog import structuredlog, handlers, formatters
class TestExpectedUpdater(unittest.TestCase):
def create_manifest(self, data, test_path="path/to/test.ini"):
f = StringIO.StringIO(data)
return manifestupdate.compile(f, test_path)
def create_updater(self, data, **kwargs):
expected_tree = {}
id_path_map = {}
for test_path, test_ids, manifest_str in data:
if isinstance(test_ids, (str, unicode)):
test_ids = [test_ids]
expected_tree[test_path] = self.create_manifest(manifest_str, test_path)
for test_id in test_ids:
id_path_map[test_id] = test_path
return metadata.ExpectedUpdater(expected_tree, id_path_map, **kwargs)
def create_log(self, *args, **kwargs):
logger = structuredlog.StructuredLogger("expected_test")
data = StringIO.StringIO()
handler = handlers.StreamHandler(data, formatters.JSONFormatter())
logger.add_handler(handler)
log_entries = ([("suite_start", {"tests": [], "run_info": kwargs.get("run_info", {})})] +
list(args) +
[("suite_end", {})])
for item in log_entries:
action, kwargs = item
getattr(logger, action)(**kwargs)
logger.remove_handler(handler)
data.seek(0)
return data
def coalesce_results(self, trees):
for tree in trees:
for test in tree.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_expected()
test.coalesce_expected()
def test_update_0(self):
prev_data = [("path/to/test.htm.ini", ["/path/to/test.htm"], """[test.htm]
type: testharness
[test1]
expected: FAIL""")]
new_data = self.create_log(("test_start", {"test": "/path/to/test.htm"}),
("test_status", {"test": "/path/to/test.htm",
"subtest": "test1",
"status": "PASS",
"expected": "FAIL"}),
("test_end", {"test": "/path/to/test.htm",
"status": "OK"}))
updater = self.create_updater(prev_data)
updater.update_from_log(new_data)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertTrue(new_manifest.is_empty)
def test_update_1(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected: ERROR""")]
new_data = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "ERROR"}),
("test_end", {"test": test_id,
"status": "OK"}))
updater = self.create_updater(prev_data)
updater.update_from_log(new_data)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
def test_new_subtest(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected: FAIL""")]
new_data = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_status", {"test": test_id,
"subtest": "test2",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"}))
updater = self.create_updater(prev_data)
updater.update_from_log(new_data)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
self.assertEquals(new_manifest.get_test(test_id).children[1].get("expected"), "FAIL")
def test_update_multiple_0(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected: FAIL""")]
new_data_0 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "osx"})
new_data_1 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "linux"})
updater = self.create_updater(prev_data)
updater.update_from_log(new_data_0)
updater.update_from_log(new_data_1)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}), "FAIL")
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "linux"}), "TIMEOUT")
def test_update_multiple_1(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected: FAIL""")]
new_data_0 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "osx"})
new_data_1 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "linux"})
updater = self.create_updater(prev_data)
updater.update_from_log(new_data_0)
updater.update_from_log(new_data_1)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}), "FAIL")
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "linux"}), "TIMEOUT")
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "windows"}), "FAIL")
def test_update_multiple_2(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected: FAIL""")]
new_data_0 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "osx"})
new_data_1 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": True, "os": "osx"})
updater = self.create_updater(prev_data)
updater.update_from_log(new_data_0)
updater.update_from_log(new_data_1)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}), "FAIL")
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": True, "os": "osx"}), "TIMEOUT")
def test_update_multiple_3(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected:
if debug: FAIL
if not debug and os == "osx": TIMEOUT""")]
new_data_0 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "osx"})
new_data_1 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": True, "os": "osx"})
updater = self.create_updater(prev_data)
updater.update_from_log(new_data_0)
updater.update_from_log(new_data_1)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}), "FAIL")
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": True, "os": "osx"}), "TIMEOUT")
def test_update_ignore_existing(self):
test_id = "/path/to/test.htm"
prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
type: testharness
[test1]
expected:
if debug: TIMEOUT
if not debug and os == "osx": NOTRUN""")]
new_data_0 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": False, "os": "linux"})
new_data_1 = self.create_log(("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"}),
run_info={"debug": True, "os": "windows"})
updater = self.create_updater(prev_data, ignore_existing=True)
updater.update_from_log(new_data_0)
updater.update_from_log(new_data_1)
new_manifest = updater.expected_tree["path/to/test.htm.ini"]
self.coalesce_results([new_manifest])
self.assertFalse(new_manifest.is_empty)
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": True, "os": "osx"}), "FAIL")
self.assertEquals(new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "osx"}), "FAIL")
| mpl-2.0 |
rmp91/jitd | java/benchmark.py | 1 | 14960 | #!/usr/bin/env
import subprocess
import glob
import os
import re
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import benchmark_configuration as config
def deleteFiles(pattern):
for fl in glob.glob(os.getcwd()+os.path.sep+pattern):
if os.path.isfile(fl):
os.remove(fl)
def deleteFile(filePath):
if os.path.isfile(filePath):
os.remove(filePath)
def deleteDataFiles():
deleteFiles("data*")
def deleteIndexFiles():
deleteFiles("index*")
def performCleanup():
deleteDataFiles()
deleteIndexFiles()
# In case you don't want to use timestamp as suffix set the flag
# 'appendTimeStampInBenchmarkFolder = False'
if config.appendTimeStampInBenchmarkFolder:
# Update the output parents with the folder name plus time stamps
timestamp = time.time()
stringTimeStamp = datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d_%H%M%S')
config.xy_output_parent = config.xy_output_parent + '_' + stringTimeStamp
config.scatter_output_parent = config.scatter_output_parent + '_' + stringTimeStamp
xy_file_path = config.xy_parent_folder + os.path.sep + config.xy_sim_file + "." + config.xy_extension
xy_output_file_path = config.xy_output_parent + os.path.sep + config.xy_output_file + "." + config.xy_log_output_extension
time_output_file_path = config.xy_output_parent + os.path.sep + config.time_output_file + "." + config.xy_output_extension
data_written_output_file_path = config.xy_output_parent + os.path.sep + config.data_written_output_file + "." + config.xy_output_extension
data_read_output_file_path = config.xy_output_parent + os.path.sep + config.data_read_output_file + "." + config.xy_output_extension
scatter_file_path = config.scatter_parent_folder + os.path.sep + config.scatter_sim_file + "." + config.xy_extension
gc_file_path = config.xy_parent_folder + os.path.sep + config.xy_sim_file + "." + config.xy_extension
figCount = 1;
#Run a cleanup incase anything was already generated
performCleanup()
gc_benchmark_initialised = False
# Run the following set of instructions for all possible VM Arguments
if config.xy_plots:
total_gc_time_for_all_runs = 0.0
gc_benchmark_file = None
gc_benchmark_file_path = config.xy_output_parent + os.path.sep + config.gc_output_file + "_" + config.xy_sim_file + "." + config.xy_output_extension
for key in sorted(config.xy_vm_argument):
print "-----------------------------------------------------------\n"
print "Running with Heap Size : "+ str(key)+"MB" + "\n"
# vm_argument = "-Xmx" + str(key)+"M"
# -Xmx50M -Xloggc:benchmark/gc1.log -verbose:gc -XX:+PrintGCDetails
heap_size = "-Xmx" + str(key)+"M"
gc_output_log_file_path = config.xy_output_parent + os.path.sep + config.gc_log_file + "_" + config.xy_sim_file + "_" + str(key) + "m" + "." + config.gc_log_extension
gc_log = "-Xloggc:"+ gc_output_log_file_path
# Create the directory already because Java won't create it for Log files
directory = os.path.dirname(gc_output_log_file_path)
if not os.path.exists(directory):
os.makedirs(directory)
verbose_gc = "-verbose:gc"
print_gc = "-XX:+PrintGCDetails"
total_gc_time_for_all_runs = 0.0
# Perform Cleanup - Delete GC Log if it exists
deleteFile(gc_output_log_file_path)
for i in xrange(0,config.runs):
print "Run Count :" + str(i+1) + "\n"
# This will simulate the calling of "java -Xmx50M -cp build:lib/* jitd.benchmark.BenchmarkGenerator"
#p = subprocess.Popen(["java", vm_argument, "-cp", config.classpath,"jitd.benchmark.BenchmarkGenerator", xy_file_path, xy_output_file_path])
p = subprocess.Popen(["java", heap_size, gc_log, verbose_gc, print_gc, "-cp", config.classpath,"jitd.benchmark.BenchmarkGenerator",xy_file_path, xy_output_file_path])
# Wait for the above process to complete.
# Removing this statement might cause following instructions to run before the previous command completes executions
p.wait()
print "Running Cleanup operations for Run "+str(i+1)+"\n"
# Delete all the generated data files
performCleanup()
print "Cleanup operations for Run "+str(i+1)+"\n"
time.sleep(5)
# Analyzing the logs
print "Analyzing the GC Log for Heap Size : "+ str(key)+"MB" + "\n"
gc_time = 0
if not gc_benchmark_initialised:
gc_benchmark_file = open(gc_benchmark_file_path, "w")
gc_benchmark_file.write("Heap Size (in MB),Time spent in Garbage Collection(in seconds)\n")
gc_benchmark_initialised = True
with open(gc_output_log_file_path) as f:
for line in f:
# If line starts with decimal
if re.match("^\d+\.\d+",line):
# Find all decimals, we will need 1st in all decimals
decimals = re.findall("\d+\.\d+", line)
if len(decimals) > 1:
# print decimals[1]
gc_time = gc_time + float(decimals[1])
print "Time taken in Garbage Collection Run "+str(i+1)+"\n"
total_gc_time_for_all_runs = total_gc_time_for_all_runs + gc_time
#print "\n"
average_gc_time = total_gc_time_for_all_runs / config.runs
print "Average Total Time spent in GC for Heap Size of " + str(key)+"MB :" + str(average_gc_time) + " seconds"
gc_benchmark_file.write(str(key)+","+str(average_gc_time)+"\n")
print "-----------------------------------------------------------\n"
# Close the file
gc_benchmark_file.close()
print "All the runs have completed successfully\n"
print "\n"
if config.gc_plots:
# Plot the graph
# GC Time vs Heap Size
figure = plt.figure(figCount)
data = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Time spent in Garbage Collection (in seconds)")
plt.title("Time spent in Garbage Collection on different Heap Sizes")
plt.grid(True)
figure.savefig(config.xy_output_parent+os.path.sep+'gc-time-vs-heap-size-'+config.xy_sim_file+'.png')
figure.show()
figCount = figCount + 1
print "Fetching data from the logs to generate averaged data.\n"
print "\n"
# Call the program to Analyze the generated log and put it in a CSV
p = subprocess.Popen(["java", "-cp", config.classpath,"jitd.benchmark.BenchmarkLogAnalyzer",xy_output_file_path, time_output_file_path, data_written_output_file_path, data_read_output_file_path])
p.wait()
print "Data Calculation completed."
print "Generating graphs"
# Calculate the generated CSV File names based on the scatter
# Plot the graphs
if config.xy_heap_vs_time:
figure1 = plt.figure(figCount)
# Time vs Heap Size Graph
data = np.genfromtxt(time_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Time Taken (in seconds)")
plt.title("Time taken in cracker mode on different Heap Sizes")
plt.grid(True)
figure1.savefig(config.xy_output_parent+os.path.sep+'time-vs-heap-size-'+config.xy_sim_file+'.png')
figure1.show()
figCount = figCount + 1
if config.xy_heap_vs_data_written:
# Data Written vs Heap Size
figure2 = plt.figure(figCount)
data = np.genfromtxt(data_written_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Total data written to disk (in MB)")
plt.title("Total data written to disk on different Heap Sizes")
plt.grid(True)
figure2.savefig(config.xy_output_parent+os.path.sep+'bytes-written-vs-heap-size-'+config.xy_sim_file+'.png')
figure2.show()
figCount = figCount + 1
if config.xy_heap_vs_data_read:
# Data Read vs Heap Size
figure3 = plt.figure(figCount)
data = np.genfromtxt(data_read_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Total data read from the disk (in MB)")
plt.title("Total data read from disk on different Heap Sizes")
plt.grid(True)
figure3.savefig(config.xy_output_parent+os.path.sep+'bytes-read-vs-heap-size-'+config.xy_sim_file+'.png')
figure3.show()
figCount = figCount + 1
if config.total_time_vs_gc_time:
figure = plt.figure(figCount)
ax = figure.add_subplot(111)
# Time vs Heap Size Graph
data1 = np.genfromtxt(time_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
data2 = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
index = np.arange(len(data1))
width = 0.25
rects1 = ax.bar(index, data1['y'], width, color = 'b')
rects2 = ax.bar(index + width, data2['y'], width, color = 'r')
ax.set_xlabel("Heap Size (in MB)")
ax.set_xticks(index + width)
ax.set_xticklabels(data1['x'])
ax.set_ylabel("Time Taken (in seconds)")
ax.set_title("Time taken in cracker mode on different Heap Sizes")
ax.legend((rects1[0], rects2[0]), ('Total Runtime', 'Garbage Collection'))
ax.grid(True)
figure.savefig(config.xy_output_parent+os.path.sep+'gc-time-total-runtime-vs-heap-size-'+config.xy_sim_file+'.png')
figure.show()
figCount = figCount + 1
# Make sure all data files are deleted before exiting
# Delete all the generated data files
performCleanup()
# Generate the scatter plots
if config.scatter_plots:
gc_benchmark_file_path = config.scatter_output_parent + os.path.sep + config.gc_output_file + "_" + config.scatter_sim_file + "." + config.scatter_output_extension
gc_benchmark_initialised = False
total_runtime_list = []
idx = 0
for key in sorted(config.scatter_vm_argument):
# vm_argument = "-Xmx" + str(key)+"M"
# vm_argument = "-Xmx" + str(key)+"M"
# -Xmx50M -Xloggc:benchmark/gc1.log -verbose:gc -XX:+PrintGCDetails
heap_size = "-Xmx" + str(key)+"M"
gc_output_log_file_path = config.scatter_output_parent + os.path.sep + config.gc_log_file + "_" + config.scatter_sim_file + "_" + str(key) + "m" + "." + config.gc_log_extension
gc_log = "-Xloggc:"+ gc_output_log_file_path
# Create the directory already because Java won't create it for Log files
directory = os.path.dirname(gc_output_log_file_path)
if not os.path.exists(directory):
os.makedirs(directory)
verbose_gc = "-verbose:gc"
print_gc = "-XX:+PrintGCDetails"
# Perform Cleanup - Delete GC Log if it exists
deleteFile(gc_output_log_file_path)
scatter_output_file_path = config.scatter_output_parent + os.path.sep + config.scatter_output_file + "_" + config.scatter_sim_file + "_" + str(key) + "m" + "." + config.scatter_output_extension
print "-----------------------------------------------------------\n"
print "Running with Heap Size : "+ str(key)+"MB" + "\n"
# p = subprocess.Popen(["java", vm_argument, "-cp", config.classpath,"jitd.benchmark.ScriptDriverBenchmark",scatter_file_path, scatter_output_file_path])
p = subprocess.Popen(["java", heap_size, gc_log, verbose_gc, print_gc, "-cp", config.classpath,"jitd.benchmark.ScriptDriverBenchmark",scatter_file_path, scatter_output_file_path])
# Wait for the above process to complete.
# Removing this statement might cause following instructions to run before the previous command completes executions
p.wait()
# Delete all the generated data files
performCleanup()
print "Cleanup operations finished\n"
time.sleep(5)
print "\n"
# Analyzing the logs
print "Analyzing the GC Log for Heap Size : "+ str(key)+"MB" + "\n"
gc_time = 0
if not gc_benchmark_initialised:
gc_benchmark_file = open(gc_benchmark_file_path, "w")
gc_benchmark_file.write("Heap Size (in MB),Time spent in Garbage Collection(in seconds)\n")
gc_benchmark_initialised = True
with open(gc_output_log_file_path) as f:
for line in f:
# If line starts with decimal
if re.match("^\d+\.\d+",line):
# Find all decimals, we will need 1st in all decimals
decimals = re.findall("\d+\.\d+", line)
if len(decimals) > 1:
# print decimals[1]
gc_time = gc_time + float(decimals[1])
print "Total Time spent in Garbage Collection for Heap Size of " + str(key)+"MB :" + str(gc_time) + " seconds \n"
gc_benchmark_file.write(str(key)+","+str(gc_time)+"\n")
# Scatter plot for
# using invalid_raise = False, ignores any row with missing values without raising exception
# using dtaype = None, makes python calculate data types by itself
data = np.genfromtxt(scatter_output_file_path, delimiter=',', invalid_raise = False, dtype = None, names=['x','y','z'])
# Calculate the total runtime and put it in the list
total_runtime = sum(data['y'])
total_runtime_list.insert(idx, total_runtime)
idx += 1
use_color = {"WRITE":"red","READ":"blue"}
color_map = []
s_map = []
i = 0
for x in data['z']:
color_map.insert(i,use_color[x])
if(x == "WRITE"):
s_map.insert(i,10)
else:
s_map.insert(i,1)
i = i + 1
figure = plt.figure(figCount)
# Specify color maps for data points using color = color_map
plt.scatter(data['x'],data['y'], s=s_map, color=color_map)
plt.xlabel("Number of Iterations")
plt.yscale('log')
plt.ylabel("Time (in seconds)")
plt.title("System Performance in cracker mode with heap size "+str(key)+"MB")
plt.grid(True)
plt.plot()
plt.ylim([0.0000001,1000])
# Legend
classes = ['Write','Read']
class_colours = ['r','b']
recs = []
# Generate the legend for the graph
for i in range(0,len(class_colours)):
recs.append(mpatches.Rectangle((0,0),1,1,fc=class_colours[i]))
plt.legend(recs,classes)
figure.savefig(config.xy_output_parent+os.path.sep+'performance_'+str(key)+"m"+'.png')
figure.show()
figCount = figCount + 1
print "\nTotal runtime for Heap Size of "+str(key) + "MB" + " :" + str(total_runtime)
print "-----------------------------------------------------------\n"
if config.total_time_vs_gc_time:
figure = plt.figure(figCount)
ax = figure.add_subplot(111)
# Close the file
gc_benchmark_file.close()
# Time vs Heap Size Graph
data1 = total_runtime_list
data2 = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
index = np.arange(len(data1))
width = 0.25
rects1 = ax.bar(index, data1, width, color = 'b')
rects2 = ax.bar(index + width, data2['y'], width, color = 'r')
ax.set_xlabel("Heap Size (in MB)")
ax.set_xticks(index + width)
ax.set_xticklabels(data2['x'])
ax.set_ylabel("Time Taken (in seconds)")
ax.set_title("Time taken in cracker mode on different Heap Sizes for Scatter Plots")
ax.legend((rects1[0], rects2[0]), ('Total Runtime', 'Garbage Collection'))
ax.grid(True)
figure.savefig(config.scatter_output_parent+os.path.sep+'gc-time-total-runtime-vs-heap-size-'+config.scatter_sim_file+'.png')
figure.show()
figCount = figCount + 1
# Following line will keep the graphs alive
print "Press Enter or Ctrl-C to exit"
raw_input() | apache-2.0 |
ZhaoCJ/django | tests/serializers_regress/tests.py | 4 | 21868 | """
A test spanning all the capabilities of all the serializers.
This class defines sample data and a dynamically generated
test case that is capable of testing the capabilities of
the serializers. This includes all valid data values, plus
forward, backwards and self references.
"""
from __future__ import unicode_literals
import datetime
import decimal
from unittest import expectedFailure, skipUnless
try:
import yaml
except ImportError:
yaml = None
from django.core import serializers
from django.core.serializers import SerializerDoesNotExist
from django.core.serializers.base import DeserializationError
from django.core.serializers.xml_serializer import DTDForbidden
from django.db import connection, models
from django.http import HttpResponse
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import curry
from .models import (BinaryData, BooleanData, CharData, DateData, DateTimeData, EmailData,
FileData, FilePathData, DecimalData, FloatData, IntegerData, IPAddressData,
GenericIPAddressData, NullBooleanData, PositiveIntegerData,
PositiveSmallIntegerData, SlugData, SmallData, TextData, TimeData,
GenericData, Anchor, UniqueAnchor, FKData, M2MData, O2OData,
FKSelfData, M2MSelfData, FKDataToField, FKDataToO2O, M2MIntermediateData,
Intermediate, BooleanPKData, CharPKData, EmailPKData, FilePathPKData,
DecimalPKData, FloatPKData, IntegerPKData, IPAddressPKData,
GenericIPAddressPKData, PositiveIntegerPKData,
PositiveSmallIntegerPKData, SlugPKData, SmallPKData,
AutoNowDateTimeData, ModifyingSaveData, InheritAbstractModel, BaseModel,
ExplicitInheritBaseModel, InheritBaseModel, ProxyBaseModel,
ProxyProxyBaseModel, BigIntegerData, LengthModel, Tag, ComplexModel,
NaturalKeyAnchor, FKDataNaturalKey)
# A set of functions that can be used to recreate
# test data objects of various kinds.
# The save method is a raw base model save, to make
# sure that the data in the database matches the
# exact test case.
def data_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def generic_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data[0]
models.Model.save_base(instance, raw=True)
for tag in data[1:]:
instance.tags.create(data=tag)
return [instance]
def fk_create(pk, klass, data):
instance = klass(id=pk)
setattr(instance, 'data_id', data)
models.Model.save_base(instance, raw=True)
return [instance]
def m2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
instance.data = data
return [instance]
def im2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
return [instance]
def im_create(pk, klass, data):
instance = klass(id=pk)
instance.right_id = data['right']
instance.left_id = data['left']
if 'extra' in data:
instance.extra = data['extra']
models.Model.save_base(instance, raw=True)
return [instance]
def o2o_create(pk, klass, data):
instance = klass()
instance.data_id = data
models.Model.save_base(instance, raw=True)
return [instance]
def pk_create(pk, klass, data):
instance = klass()
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def inherited_create(pk, klass, data):
instance = klass(id=pk,**data)
# This isn't a raw save because:
# 1) we're testing inheritance, not field behavior, so none
# of the field values need to be protected.
# 2) saving the child class and having the parent created
# automatically is easier than manually creating both.
models.Model.save(instance)
created = [instance]
for klass,field in instance._meta.parents.items():
created.append(klass.objects.get(id=pk))
return created
# A set of functions that can be used to compare
# test data objects of various kinds
def data_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
if klass == BinaryData and data is not None:
testcase.assertEqual(bytes(data), bytes(instance.data),
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, repr(bytes(data)), type(data), repr(bytes(instance.data)),
type(instance.data))
)
else:
testcase.assertEqual(data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, data, type(data), instance, type(instance.data))
)
def generic_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data[0], instance.data)
testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')])
def fk_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, instance.data_id)
def m2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')])
def im2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
#actually nothing else to check, the instance just should exist
def im_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data['left'], instance.left_id)
testcase.assertEqual(data['right'], instance.right_id)
if 'extra' in data:
testcase.assertEqual(data['extra'], instance.extra)
else:
testcase.assertEqual("doesn't matter", instance.extra)
def o2o_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data_id)
def pk_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data)
def inherited_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
for key,value in data.items():
testcase.assertEqual(value, getattr(instance,key))
# Define some data types. Each data type is
# actually a pair of functions; one to create
# and one to compare objects of that type
data_obj = (data_create, data_compare)
generic_obj = (generic_create, generic_compare)
fk_obj = (fk_create, fk_compare)
m2m_obj = (m2m_create, m2m_compare)
im2m_obj = (im2m_create, im2m_compare)
im_obj = (im_create, im_compare)
o2o_obj = (o2o_create, o2o_compare)
pk_obj = (pk_create, pk_compare)
inherited_obj = (inherited_create, inherited_compare)
test_data = [
# Format: (data type, PK value, Model Class, data)
(data_obj, 1, BinaryData, six.memoryview(b"\x05\xFD\x00")),
(data_obj, 2, BinaryData, None),
(data_obj, 5, BooleanData, True),
(data_obj, 6, BooleanData, False),
(data_obj, 10, CharData, "Test Char Data"),
(data_obj, 11, CharData, ""),
(data_obj, 12, CharData, "None"),
(data_obj, 13, CharData, "null"),
(data_obj, 14, CharData, "NULL"),
(data_obj, 15, CharData, None),
# (We use something that will fit into a latin1 database encoding here,
# because that is still the default used on many system setups.)
(data_obj, 16, CharData, '\xa5'),
(data_obj, 20, DateData, datetime.date(2006,6,16)),
(data_obj, 21, DateData, None),
(data_obj, 30, DateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 31, DateTimeData, None),
(data_obj, 40, EmailData, "hovercraft@example.com"),
(data_obj, 41, EmailData, None),
(data_obj, 42, EmailData, ""),
(data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'),
# (data_obj, 51, FileData, None),
(data_obj, 52, FileData, ""),
(data_obj, 60, FilePathData, "/foo/bar/whiz.txt"),
(data_obj, 61, FilePathData, None),
(data_obj, 62, FilePathData, ""),
(data_obj, 70, DecimalData, decimal.Decimal('12.345')),
(data_obj, 71, DecimalData, decimal.Decimal('-12.345')),
(data_obj, 72, DecimalData, decimal.Decimal('0.0')),
(data_obj, 73, DecimalData, None),
(data_obj, 74, FloatData, 12.345),
(data_obj, 75, FloatData, -12.345),
(data_obj, 76, FloatData, 0.0),
(data_obj, 77, FloatData, None),
(data_obj, 80, IntegerData, 123456789),
(data_obj, 81, IntegerData, -123456789),
(data_obj, 82, IntegerData, 0),
(data_obj, 83, IntegerData, None),
#(XX, ImageData
(data_obj, 90, IPAddressData, "127.0.0.1"),
(data_obj, 91, IPAddressData, None),
(data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
(data_obj, 96, GenericIPAddressData, None),
(data_obj, 100, NullBooleanData, True),
(data_obj, 101, NullBooleanData, False),
(data_obj, 102, NullBooleanData, None),
(data_obj, 120, PositiveIntegerData, 123456789),
(data_obj, 121, PositiveIntegerData, None),
(data_obj, 130, PositiveSmallIntegerData, 12),
(data_obj, 131, PositiveSmallIntegerData, None),
(data_obj, 140, SlugData, "this-is-a-slug"),
(data_obj, 141, SlugData, None),
(data_obj, 142, SlugData, ""),
(data_obj, 150, SmallData, 12),
(data_obj, 151, SmallData, -12),
(data_obj, 152, SmallData, 0),
(data_obj, 153, SmallData, None),
(data_obj, 160, TextData, """This is a long piece of text.
It contains line breaks.
Several of them.
The end."""),
(data_obj, 161, TextData, ""),
(data_obj, 162, TextData, None),
(data_obj, 170, TimeData, datetime.time(10,42,37)),
(data_obj, 171, TimeData, None),
(generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']),
(generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']),
(data_obj, 300, Anchor, "Anchor 1"),
(data_obj, 301, Anchor, "Anchor 2"),
(data_obj, 302, UniqueAnchor, "UAnchor 1"),
(fk_obj, 400, FKData, 300), # Post reference
(fk_obj, 401, FKData, 500), # Pre reference
(fk_obj, 402, FKData, None), # Empty reference
(m2m_obj, 410, M2MData, []), # Empty set
(m2m_obj, 411, M2MData, [300,301]), # Post reference
(m2m_obj, 412, M2MData, [500,501]), # Pre reference
(m2m_obj, 413, M2MData, [300,301,500,501]), # Pre and Post reference
(o2o_obj, None, O2OData, 300), # Post reference
(o2o_obj, None, O2OData, 500), # Pre reference
(fk_obj, 430, FKSelfData, 431), # Pre reference
(fk_obj, 431, FKSelfData, 430), # Post reference
(fk_obj, 432, FKSelfData, None), # Empty reference
(m2m_obj, 440, M2MSelfData, []),
(m2m_obj, 441, M2MSelfData, []),
(m2m_obj, 442, M2MSelfData, [440, 441]),
(m2m_obj, 443, M2MSelfData, [445, 446]),
(m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]),
(m2m_obj, 445, M2MSelfData, []),
(m2m_obj, 446, M2MSelfData, []),
(fk_obj, 450, FKDataToField, "UAnchor 1"),
(fk_obj, 451, FKDataToField, "UAnchor 2"),
(fk_obj, 452, FKDataToField, None),
(fk_obj, 460, FKDataToO2O, 300),
(im2m_obj, 470, M2MIntermediateData, None),
#testing post- and prereferences and extra fields
(im_obj, 480, Intermediate, {'right': 300, 'left': 470}),
(im_obj, 481, Intermediate, {'right': 300, 'left': 490}),
(im_obj, 482, Intermediate, {'right': 500, 'left': 470}),
(im_obj, 483, Intermediate, {'right': 500, 'left': 490}),
(im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}),
(im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}),
(im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}),
(im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}),
(im2m_obj, 490, M2MIntermediateData, []),
(data_obj, 500, Anchor, "Anchor 3"),
(data_obj, 501, Anchor, "Anchor 4"),
(data_obj, 502, UniqueAnchor, "UAnchor 2"),
(pk_obj, 601, BooleanPKData, True),
(pk_obj, 602, BooleanPKData, False),
(pk_obj, 610, CharPKData, "Test Char PKData"),
# (pk_obj, 620, DatePKData, datetime.date(2006,6,16)),
# (pk_obj, 630, DateTimePKData, datetime.datetime(2006,6,16,10,42,37)),
(pk_obj, 640, EmailPKData, "hovercraft@example.com"),
# (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'),
(pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"),
(pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')),
(pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')),
(pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')),
(pk_obj, 673, FloatPKData, 12.345),
(pk_obj, 674, FloatPKData, -12.345),
(pk_obj, 675, FloatPKData, 0.0),
(pk_obj, 680, IntegerPKData, 123456789),
(pk_obj, 681, IntegerPKData, -123456789),
(pk_obj, 682, IntegerPKData, 0),
# (XX, ImagePKData
(pk_obj, 690, IPAddressPKData, "127.0.0.1"),
(pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
# (pk_obj, 700, NullBooleanPKData, True),
# (pk_obj, 701, NullBooleanPKData, False),
(pk_obj, 720, PositiveIntegerPKData, 123456789),
(pk_obj, 730, PositiveSmallIntegerPKData, 12),
(pk_obj, 740, SlugPKData, "this-is-a-slug"),
(pk_obj, 750, SmallPKData, 12),
(pk_obj, 751, SmallPKData, -12),
(pk_obj, 752, SmallPKData, 0),
# (pk_obj, 760, TextPKData, """This is a long piece of text.
# It contains line breaks.
# Several of them.
# The end."""),
# (pk_obj, 770, TimePKData, datetime.time(10,42,37)),
# (pk_obj, 790, XMLPKData, "<foo></foo>"),
(data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 810, ModifyingSaveData, 42),
(inherited_obj, 900, InheritAbstractModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 910, ExplicitInheritBaseModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 920, InheritBaseModel, {'child_data':37,'parent_data':42}),
(data_obj, 1000, BigIntegerData, 9223372036854775807),
(data_obj, 1001, BigIntegerData, -9223372036854775808),
(data_obj, 1002, BigIntegerData, 0),
(data_obj, 1003, BigIntegerData, None),
(data_obj, 1004, LengthModel, 0),
(data_obj, 1005, LengthModel, 1),
]
natural_key_test_data = [
(data_obj, 1100, NaturalKeyAnchor, "Natural Key Anghor"),
(fk_obj, 1101, FKDataNaturalKey, 1100),
(fk_obj, 1102, FKDataNaturalKey, None),
]
# Because Oracle treats the empty string as NULL, Oracle is expected to fail
# when field.empty_strings_allowed is True and the value is None; skip these
# tests.
if connection.features.interprets_empty_strings_as_nulls:
test_data = [data for data in test_data
if not (data[0] == data_obj and
data[2]._meta.get_field('data').empty_strings_allowed and
data[3] is None)]
# Regression test for #8651 -- a FK to an object iwth PK of 0
# This won't work on MySQL since it won't let you create an object
# with a primary key of 0,
if connection.features.allows_primary_key_0:
test_data.extend([
(data_obj, 0, Anchor, "Anchor 0"),
(fk_obj, 465, FKData, 0),
])
# Dynamically create serializer tests to ensure that all
# registered serializers are automatically tested.
class SerializerTests(TestCase):
def test_get_unknown_serializer(self):
"""
#15889: get_serializer('nonsense') raises a SerializerDoesNotExist
"""
with self.assertRaises(SerializerDoesNotExist):
serializers.get_serializer("nonsense")
with self.assertRaises(KeyError):
serializers.get_serializer("nonsense")
# SerializerDoesNotExist is instantiated with the nonexistent format
with self.assertRaises(SerializerDoesNotExist) as cm:
serializers.get_serializer("nonsense")
self.assertEqual(cm.exception.args, ("nonsense",))
def test_unregister_unkown_serializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.unregister_serializer("nonsense")
def test_get_unkown_deserializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.get_deserializer("nonsense")
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("json", """[{"pk":1}"""):
pass
@skipUnless(yaml, "PyYAML not installed")
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
def test_serialize_proxy_model(self):
BaseModel.objects.create(parent_data=1)
base_objects = BaseModel.objects.all()
proxy_objects = ProxyBaseModel.objects.all()
proxy_proxy_objects = ProxyProxyBaseModel.objects.all()
base_data = serializers.serialize("json", base_objects)
proxy_data = serializers.serialize("json", proxy_objects)
proxy_proxy_data = serializers.serialize("json", proxy_proxy_objects)
self.assertEqual(base_data, proxy_data.replace('proxy', ''))
self.assertEqual(base_data, proxy_proxy_data.replace('proxy', ''))
def serializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Add the generic tagged objects to the object list
objects.extend(Tag.objects.all())
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
serializerTest = expectedFailure(serializerTest)
def naturalKeySerializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in natural_key_test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2,
use_natural_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in natural_key_test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
def fieldsTest(format, self):
obj = ComplexModel(field1='first', field2='second', field3='third')
obj.save_base(raw=True)
# Serialize then deserialize the test database
serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1','field3'))
result = next(serializers.deserialize(format, serialized_data))
# Check that the deserialized object contains data in only the serialized fields.
self.assertEqual(result.object.field1, 'first')
self.assertEqual(result.object.field2, '')
self.assertEqual(result.object.field3, 'third')
def streamTest(format, self):
obj = ComplexModel(field1='first',field2='second',field3='third')
obj.save_base(raw=True)
# Serialize the test database to a stream
for stream in (six.StringIO(), HttpResponse()):
serializers.serialize(format, [obj], indent=2, stream=stream)
# Serialize normally for a comparison
string_data = serializers.serialize(format, [obj], indent=2)
# Check that the two are the same
if isinstance(stream, six.StringIO):
self.assertEqual(string_data, stream.getvalue())
else:
self.assertEqual(string_data, stream.content.decode('utf-8'))
for format in [
f for f in serializers.get_serializer_formats()
if not isinstance(serializers.get_serializer(f), serializers.BadSerializer)
]:
setattr(SerializerTests, 'test_' + format + '_serializer', curry(serializerTest, format))
setattr(SerializerTests, 'test_' + format + '_natural_key_serializer', curry(naturalKeySerializerTest, format))
setattr(SerializerTests, 'test_' + format + '_serializer_fields', curry(fieldsTest, format))
if format != 'python':
setattr(SerializerTests, 'test_' + format + '_serializer_stream', curry(streamTest, format))
class XmlDeserializerSecurityTests(TestCase):
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = '<?xml version="1.0" standalone="no"?><!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
with self.assertRaises(DTDForbidden):
next(serializers.deserialize('xml', xml))
| bsd-3-clause |
Greymerk/python-rpg | src/actions/ready.py | 1 | 1058 | '''
Created on 2013-06-04
@author: brian
'''
import pygame
from pygame.locals import *
class Ready(object):
def __init__(self, player):
self.player = player
self.player.log.append('Ready a new weapon')
self.choice = None
def nextStep(self):
if(self.choice is not None):
self.player.party.getLeader().inventory.weapon = self.choices[self.choice]()
self.player.log.append('Equipped a ' + self.player.party.getLeader().inventory.weapon.__class__.__name__)
return True
e = pygame.event.poll()
if e is None:
return False
if e.type != KEYDOWN:
return False
if(self.choices.has_key(e.key)):
self.choice = e.key
return False
elif(e.key == K_ESCAPE):
self.player.log.append('Cancelled')
return True
return False
| gpl-3.0 |
saisai/phantomjs | src/qt/qtwebkit/Tools/BuildSlaveSupport/build.webkit.org-config/wkbuild_unittest.py | 116 | 4793 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import wkbuild
class ShouldBuildTest(unittest.TestCase):
_should_build_tests = [
(["ChangeLog", "Source/WebCore/ChangeLog", "Source/WebKit2/ChangeLog-2011-02-11"], []),
(["GNUmakefile.am", "Source/WebCore/GNUmakefile.am"], ["gtk"]),
(["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),
(["Websites/bugs.webkit.org/foo"], []),
(["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),
(["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]),
(["LayoutTests/foo"], ["*"]),
(["LayoutTests/canvas/philip/tests/size.attributes.parse.exp-expected.txt", "LayoutTests/canvas/philip/tests/size.attributes.parse.exp.html"], ["*"]),
(["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]),
(["LayoutTests/platform/mac-lion/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/mac-snowleopard/foo"], ["mac-leopard", "mac-snowleopard"]),
(["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
(["LayoutTests/platform/win-xp/foo"], ["win"]),
(["LayoutTests/platform/win-wk2/foo"], ["win"]),
(["LayoutTests/platform/win/foo"], ["win"]),
(["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/win/foo"], ["win"]),
(["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/platform/wx/wxcode/win/foo"], []),
(["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
(["Source/WebCore/rendering/RenderThemeWinCE.h"], []),
(["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []),
]
def test_should_build(self):
for files, platforms in self._should_build_tests:
# FIXME: We should test more platforms here once
# wkbuild._should_file_trigger_build is implemented for them.
for platform in ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]:
should_build = platform in platforms or "*" in platforms
self.assertEqual(wkbuild.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files)))
# FIXME: We should run this file as part of test-rm .
# Unfortunately test-rm currently requires that unittests
# be located in a directory with a valid module name.
# 'build.webkit.org-config' is not a valid module name (due to '.' and '-')
# so for now this is a stand-alone test harness.
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
thaumos/ansible | lib/ansible/modules/cloud/scaleway/scaleway_volume_facts.py | 48 | 2704 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: scaleway_volume_facts
short_description: Gather facts about the Scaleway volumes available.
description:
- Gather facts about the Scaleway volumes available.
version_added: "2.7"
author:
- "Yanis Guenane (@Spredzy)"
- "Remy Leone (@sieben)"
extends_documentation_fragment: scaleway
options:
region:
version_added: "2.8"
description:
- Scaleway region to use (for example par1).
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
'''
EXAMPLES = r'''
- name: Gather Scaleway volumes facts
scaleway_volume_facts:
region: par1
'''
RETURN = r'''
---
scaleway_volume_facts:
description: Response from Scaleway API
returned: success
type: complex
contains:
"scaleway_volume_facts": [
{
"creation_date": "2018-08-14T20:56:24.949660+00:00",
"export_uri": null,
"id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
"modification_date": "2018-08-14T20:56:24.949660+00:00",
"name": "test-volume",
"organization": "3f709602-5e6c-4619-b80c-e841c89734af",
"server": null,
"size": 50000000000,
"state": "available",
"volume_type": "l_ssd"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.scaleway import (
Scaleway, ScalewayException, scaleway_argument_spec,
SCALEWAY_LOCATION)
class ScalewayVolumeFacts(Scaleway):
def __init__(self, module):
super(ScalewayVolumeFacts, self).__init__(module)
self.name = 'volumes'
region = module.params["region"]
self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
module.exit_json(
ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()}
)
except ScalewayException as exc:
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
lmprice/ansible | lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py | 15 | 14879 | #!/usr/bin/python
#
# Copyright (c) 2017 Yawei Wang, <yaweiw@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerregistry
version_added: "2.5"
short_description: Manage an Azure Container Registry.
description:
- Create, update and delete an Azure Container Registry.
options:
resource_group:
description:
- Name of a resource group where the Container Registry exists or will be created.
required: true
name:
description:
- Name of the Container Registry.
required: true
state:
description:
- Assert the state of the container registry. Use 'present' to create or update an container registry and 'absent' to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
admin_user_enabled:
description:
- If enabled, you can use the registry name as username and admin user access key as password to docker login to your container registry.
type: bool
default: no
sku:
description:
- Specifies the SKU to use. Currently can be either Basic, Standard or Premium.
default: Standard
choices:
- Basic
- Standard
- Premium
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yawei Wang (@yaweiw)"
'''
EXAMPLES = '''
- name: Create an azure container registry
azure_rm_containerregistry:
name: testacr1
location: eastus
resource_group: testrg
state: present
admin_user_enabled: true
sku: Premium
tags:
Release: beta1
Environment: Production
- name: Remove an azure container registry
azure_rm_containerregistry:
name: testacr2
resource_group: testrg
state: absent
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/00000000-0000-0000-0000-000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry
name:
description:
- Registry name
returned: always
type: str
sample: myregistry
location:
description:
- Resource location
returned: always
type: str
sample: westus
admin_user_enabled:
description:
- Is admin user enabled
returned: always
type: bool
sample: true
sku:
description:
- SKU
returned: always
type: str
sample: Standard
provisioning_state:
description:
- Provisioning state
returned: always
type: str
sample: Succeeded
login_server:
description:
- Registry login server
returned: always
type: str
sample: myregistry.azurecr.io
credentials:
description:
- Passwords defined for the registry
returned: always
type: complex
contains:
password:
description:
- password value
returned: when registry exists and C(admin_user_enabled) is set
type: str
sample: pass1value
password2:
description:
- password2 value
returned: when registry exists and C(admin_user_enabled) is set
type: str
sample: pass2value
tags:
description:
- Tags
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.containerregistry.models import (
Registry,
RegistryUpdateParameters,
StorageAccountProperties,
Sku,
SkuName,
SkuTier,
ProvisioningState,
PasswordName,
WebhookCreateParameters,
WebhookUpdateParameters,
WebhookAction,
WebhookStatus
)
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
except ImportError as exc:
# This is handled in azure_rm_common
pass
def create_containerregistry_dict(registry, credentials):
'''
Helper method to deserialize a ContainerRegistry to a dict
:param: registry: return container registry object from Azure rest API call
:param: credentials: return credential objects from Azure rest API call
:return: dict of return container registry and it's credentials
'''
results = dict(
id=registry.id if registry is not None else "",
name=registry.name if registry is not None else "",
location=registry.location if registry is not None else "",
admin_user_enabled=registry.admin_user_enabled if registry is not None else "",
sku=registry.sku.name if registry is not None else "",
provisioning_state=registry.provisioning_state if registry is not None else "",
login_server=registry.login_server if registry is not None else "",
credentials=dict(),
tags=registry.tags if registry is not None else ""
)
if credentials:
results['credentials'] = dict(
password=credentials.passwords[0].value,
password2=credentials.passwords[1].value
)
return results
class Actions:
NoAction, Create, Update = range(3)
class AzureRMContainerRegistry(AzureRMModuleBase):
"""Configuration class for an Azure RM container registry resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
admin_user_enabled=dict(
type='bool',
required=False,
default=False
),
sku=dict(
type='str',
required=False,
default='Basic',
choices=['Basic', 'Standard', 'Premium']
)
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.sku = None
self.tags = None
self._containerregistry_mgmt_client = None
self.results = dict(changed=False, state=dict())
super(AzureRMContainerRegistry, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
to_do = Actions.NoAction
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the container registry instance already present in the RG
if self.state == 'present':
response = self.get_containerregistry()
if not response:
to_do = Actions.Create
else:
self.log('Results : {0}'.format(response))
self.results.update(response)
if response['provisioning_state'] == "Succeeded":
to_do = Actions.NoAction
if (self.location is not None) and self.location != response['location']:
to_do = Actions.Update
elif (self.sku is not None) and self.location != response['sku']:
to_do = Actions.Update
else:
to_do = Actions.NoAction
self.log("Create / Update the container registry instance")
if self.check_mode:
return self.results
self.results.update(self.create_update_containerregistry(to_do))
if to_do != Actions.NoAction:
self.results['changed'] = True
else:
self.results['changed'] = False
self.log("Container registry instance created or updated")
elif self.state == 'absent':
if self.check_mode:
return self.results
self.delete_containerregistry()
self.log("Container registry instance deleted")
return self.results
def create_update_containerregistry(self, to_do):
'''
Creates or updates a container registry.
:return: deserialized container registry instance state dictionary
'''
self.log("Creating / Updating the container registry instance {0}".format(self.name))
try:
if to_do != Actions.NoAction:
if to_do == Actions.Create:
name_status = self.containerregistry_mgmt_client.registries.check_name_availability(self.name)
if name_status.name_available:
poller = self.containerregistry_mgmt_client.registries.create(
resource_group_name=self.resource_group,
registry_name=self.name,
registry=Registry(
location=self.location,
sku=Sku(
name=self.sku
),
tags=self.tags,
admin_user_enabled=self.admin_user_enabled
)
)
else:
raise Exception("Invalid registry name. reason: " + name_status.reason + " message: " + name_status.message)
else:
registry = self.containerregistry_mgmt_client.registries.get(self.resource_group, self.name)
if registry is not None:
poller = self.containerregistry_mgmt_client.registries.update(
resource_group_name=self.resource_group,
registry_name=self.name,
registry_update_parameters=RegistryUpdateParameters(
sku=Sku(
name=self.sku
),
tags=self.tags,
admin_user_enabled=self.admin_user_enabled
)
)
else:
raise Exception("Update registry failed as registry '" + self.name + "' doesn't exist.")
response = self.get_poller_result(poller)
if self.admin_user_enabled:
credentials = self.containerregistry_mgmt_client.registries.list_credentials(self.resource_group, self.name)
else:
self.log('Cannot perform credential operations as admin user is disabled')
credentials = None
else:
response = None
credentials = None
except (CloudError, Exception) as exc:
self.log('Error attempting to create / update the container registry instance.')
self.fail("Error creating / updating the container registry instance: {0}".format(str(exc)))
return create_containerregistry_dict(response, credentials)
def delete_containerregistry(self):
'''
Deletes the specified container registry in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the container registry instance {0}".format(self.name))
try:
self.containerregistry_mgmt_client.registries.delete(self.resource_group, self.name).wait()
except CloudError as e:
self.log('Error attempting to delete the container registry instance.')
self.fail("Error deleting the container registry instance: {0}".format(str(e)))
return True
def get_containerregistry(self):
'''
Gets the properties of the specified container registry.
:return: deserialized container registry state dictionary
'''
self.log("Checking if the container registry instance {0} is present".format(self.name))
found = False
try:
response = self.containerregistry_mgmt_client.registries.get(self.resource_group, self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Container registry instance : {0} found".format(response.name))
except CloudError as e:
if e.error.error == 'ResourceNotFound':
self.log('Did not find the container registry instance: {0}'.format(str(e)))
else:
self.fail('Error while trying to get container registry instance: {0}'.format(str(e)))
response = None
if found is True and self.admin_user_enabled is True:
try:
credentials = self.containerregistry_mgmt_client.registries.list_credentials(self.resource_group, self.name)
except CloudError as e:
self.fail('List registry credentials failed: {0}'.format(str(e)))
credentials = None
elif found is True and self.admin_user_enabled is False:
credentials = None
else:
return None
return create_containerregistry_dict(response, credentials)
@property
def containerregistry_mgmt_client(self):
self.log('Getting container registry mgmt client')
if not self._containerregistry_mgmt_client:
self._containerregistry_mgmt_client = self.get_mgmt_svc_client(
ContainerRegistryManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-10-01'
)
return self._containerregistry_mgmt_client
def main():
"""Main execution"""
AzureRMContainerRegistry()
if __name__ == '__main__':
main()
| gpl-3.0 |
duniel/ido-kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
podemos-info/odoo | addons/delivery/wizard/__init__.py | 10 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import delivery_sale_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
js0701/chromium-crosswalk | chrome/test/chromedriver/server/server.py | 121 | 2131 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import os
import socket
import subprocess
import time
import urllib2
class Server(object):
"""A running ChromeDriver server."""
def __init__(self, exe_path, log_path=None):
"""Starts the ChromeDriver server and waits for it to be ready.
Args:
exe_path: path to the ChromeDriver executable
log_path: path to the log file
Raises:
RuntimeError if ChromeDriver fails to start
"""
if not os.path.exists(exe_path):
raise RuntimeError('ChromeDriver exe not found at: ' + exe_path)
port = self._FindOpenPort()
chromedriver_args = [exe_path, '--port=%d' % port]
if log_path:
chromedriver_args.extend(['--verbose', '--log-path=%s' % log_path])
self._process = subprocess.Popen(chromedriver_args)
self._url = 'http://127.0.0.1:%d' % port
if self._process is None:
raise RuntimeError('ChromeDriver server cannot be started')
max_time = time.time() + 10
while not self.IsRunning():
if time.time() > max_time:
self._process.terminate()
raise RuntimeError('ChromeDriver server did not start')
time.sleep(0.1)
atexit.register(self.Kill)
def _FindOpenPort(self):
for port in range(9500, 10000):
try:
socket.create_connection(('127.0.0.1', port), 0.2).close()
except socket.error:
return port
raise RuntimeError('Cannot find open port to launch ChromeDriver')
def GetUrl(self):
return self._url
def IsRunning(self):
"""Returns whether the server is up and running."""
try:
urllib2.urlopen(self.GetUrl() + '/status')
return True
except urllib2.URLError:
return False
def Kill(self):
"""Kills the ChromeDriver server, if it is running."""
if self._process is None:
return
try:
urllib2.urlopen(self.GetUrl() + '/shutdown', timeout=10).close()
except:
self._process.terminate()
self._process.wait()
self._process = None
| bsd-3-clause |
austromorph/cartogram3 | ui/cartogram_dialog.py | 1 | 1620 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
CartogramDialog
A QGIS plugin
Generate anamorphic maps
-------------------
begin : 2017-02-09
git sha : $Format:%H$
copyright : (C) 2017 by Christoph Fink
email : morph@austromorph.space
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5 import uic
from PyQt5 import QtWidgets
FORM_CLASS, _ = uic.loadUiType(
os.path.join(
os.path.dirname(__file__),
"cartogram_dialog.ui"
),
from_imports=True
)
class CartogramDialog(QtWidgets.QDialog, FORM_CLASS):
"""Main dialog for the cartogram3 plugin."""
def __init__(self, parent=None):
"""Initialise a CartogramDialog."""
super(CartogramDialog, self).__init__(parent)
self.setupUi(self)
| gpl-3.0 |
maellak/invenio | modules/webauthorlist/lib/authorlist_engine.py | 15 | 34338 | ## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Invenio Authorlist Data Conversion Engine. """
import time
try:
import json
except ImportError:
import simplejson as json
from xml.dom import minidom
try:
from xml.etree import ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from invenio.webuser import page_not_authorized
from invenio.access_control_engine import acc_authorize_action
import invenio.authorlist_config as cfg
from invenio.search_engine import perform_request_search, record_exists
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibedit_utils import get_record
# from lxml import etree
from invenio.authorlist_dblayer import get_owner
from invenio.textutils import escape_latex
# default name that will be used, when affiliation name is missing
UNKNOWN_AFFILIATION = 'Unknown Affiliation'
# Namespaces used in the xml file
NAMESPACES = {'cal': 'http://www.slac.stanford.edu/spires/hepnames/authors_xml/',
'foaf': 'http://xmlns.com/foaf/0.1/',
}
def retrieve_data_from_record(recid):
"""
Extract data from a record id in order to import it to the Author list
interface
"""
if not record_exists(recid):
return
output = {}
DEFAULT_AFFILIATION_TYPE = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE[0]
DEFAULT_IDENTIFIER = cfg.OPTIONS.IDENTIFIERS_LIST[0]
IDENTIFIERS_MAPPING = cfg.OPTIONS.IDENTIFIERS_MAPPING
bibrecord = get_record(recid)
try:
paper_title = get_fieldvalues(recid, '245__a')[0]
except IndexError:
paper_title = ""
try:
collaboration_name = get_fieldvalues(recid, '710__g')
except IndexError:
collaboration_name = ""
try:
experiment_number = get_fieldvalues(recid, '693__e')
except IndexError:
experiment_number = ""
record_authors = bibrecord.get('100', [])
record_authors.extend(bibrecord.get('700', []))
author_list = []
unique_affiliations = []
for i, field_instance in enumerate(record_authors, 1):
family_name = ""
given_name = ""
name_on_paper = ""
status = ""
affiliations = []
identifiers = []
field = field_instance[0]
for subfield_code, subfield_value in field:
if subfield_code == "a":
try:
family_name = subfield_value.split(',')[0]
given_name = subfield_value.split(',')[1].lstrip()
except:
pass
name_on_paper = subfield_value
elif subfield_code == "u":
affiliations.append([subfield_value, DEFAULT_AFFILIATION_TYPE])
unique_affiliations.append(subfield_value)
elif subfield_code == "i":
# FIXME This will currently work only with INSPIRE IDs
id_prefix = subfield_value.split("-")[0]
if id_prefix in IDENTIFIERS_MAPPING:
identifiers.append([subfield_value, IDENTIFIERS_MAPPING[id_prefix]])
if not identifiers:
identifiers.append(['', DEFAULT_IDENTIFIER])
if not affiliations:
affiliations.append([UNKNOWN_AFFILIATION, DEFAULT_AFFILIATION_TYPE])
unique_affiliations.append(UNKNOWN_AFFILIATION)
author_list.append([
i, # Row number
'', # Place holder for the web interface
family_name,
given_name,
name_on_paper,
status,
affiliations,
identifiers
])
unique_affiliations = list(set(unique_affiliations))
output.update({'authors': author_list})
# Generate all the affiliation related information
affiliation_list = []
for i, affiliation in enumerate(unique_affiliations, 1):
institution = perform_request_search(c="Institutions", p='110__u:"' + affiliation + '"')
full_name = affiliation
if len(institution) == 1:
full_name_110_a = get_fieldvalues(institution[0], '110__a')
if full_name_110_a:
full_name = str(full_name_110_a[0])
full_name_110_b = get_fieldvalues(institution[0], '110__b')
if full_name_110_b:
full_name += ', ' + str(full_name_110_b[0])
affiliation = [i,
'',
affiliation,
'',
full_name,
'',
True,
'']
affiliation_list.append(affiliation)
output.update({'affiliations': affiliation_list})
output.update({'paper_title': paper_title,
'collaboration': collaboration_name,
'experiment_number': experiment_number,
'last_modified': int(time.time()),
'reference_ids': [],
'paper_id': '1'})
return output
def retrieve_data_from_xml(xml):
"""
Extract data from an XML file to import it to the Author list
interface
"""
def get_element_value_helper(element, tag):
"""
Helper that takes an element and returns text from the first node
of that element
"""
text = ''
elements_list = element.getElementsByTagName(tag)
if elements_list:
child = elements_list[0].firstChild
if child:
text = child.nodeValue
return text
output = {}
# Save the affiliatons variable, the default value for "Affiliation" column
# will be always first value from type_of_affiliation table
type_of_affiliation = cfg.OPTIONS.AUTHOR_AFFILIATION_TYPE
# Save the default identifier - first element from the list of identifiers
default_identifier = cfg.OPTIONS.IDENTIFIERS_LIST[0]
# Save identifiers mapping
identifiers_mapping = cfg.OPTIONS.IDENTIFIERS_MAPPING
parsed_xml = minidom.parseString(xml)
# Extract collaboration name and experiment number
collaboration_name = ''
experiment_number = ''
collaborations = parsed_xml.getElementsByTagName('cal:collaborations')
if len(collaborations) == 1:
collaboration_name = get_element_value_helper(collaborations[0], 'foaf:name')
experiment_number = get_element_value_helper(collaborations[0], 'cal:experimentNumber')
# Extract affiliations
affiliation_list = []
affiliation_id_name = {}
affiliations = parsed_xml.getElementsByTagName('foaf:Organization')
for i, affiliation in enumerate(affiliations):
affiliation_id = affiliation.getAttribute('id') or ''
affiliation_name = get_element_value_helper(affiliation, 'foaf:name')
affiliation_acronym = get_element_value_helper(affiliation, 'cal:orgName')
if not affiliation_acronym:
# No acronym ? Use the name instead
affiliation_acronym = affiliation_name
affiliation_address = get_element_value_helper(affiliation, 'cal:orgAddress')
if not affiliation_address:
affiliation_address = affiliation_name
affiliation_domain = get_element_value_helper(affiliation, 'cal:orgDomain')
# saving {id:name}, it will be needed for authors affiliations
if affiliation_id:
# According to
# http://stackoverflow.com/questions/8214932/how-to-check-if-a-value-exists-in-a-dictionary-python
# itervalues is faster than values() and viewvalues()
if affiliation_acronym in affiliation_id_name.itervalues():
# in case we have a duplicate of acronym, make it unique by
# appending the iteration number
affiliation_acronym += str(i+1)
affiliation_id_name[affiliation_id] = affiliation_acronym
affiliation_info = [long(i+1),
'',
affiliation_acronym,
'',
affiliation_address,
affiliation_domain,
True,
'']
affiliation_list.append(affiliation_info)
# Extract authors
author_list = []
authors = parsed_xml.getElementsByTagName('foaf:Person')
for i, author in enumerate(authors):
first_name = get_element_value_helper(author, 'foaf:givenName')
# In case there was no given name under previous field, we search for initials in cal:authorNamePaperGiven
if not first_name:
first_name = get_element_value_helper(author, 'cal:authorNamePaperGiven')
last_name = get_element_value_helper(author, 'foaf:familyName')
full_name = get_element_value_helper(author, 'cal:authorNamePaper')
status = get_element_value_helper(author, 'cal:authorStatus')
# Extract author affiliations
author_affiliations = []
if author.getElementsByTagName('cal:authorAffiliations'):
for afil in author.getElementsByTagName('cal:authorAffiliations')[0].getElementsByTagName('cal:authorAffiliation'):
a_id = afil.getAttribute('organizationid')
if afil.getAttribute('connection') in type_of_affiliation:
affiliation_type = afil.getAttribute('connection')
else:
affiliation_type = type_of_affiliation[0]
author_affiliations.append([affiliation_id_name.get(a_id, UNKNOWN_AFFILIATION), affiliation_type])
else:
author_affiliations = [UNKNOWN_AFFILIATION, type_of_affiliation[0]]
identifiers = []
if author.getElementsByTagName('cal:authorids'):
for author_id in author.getElementsByTagName('cal:authorids')[0].getElementsByTagName('cal:authorid'):
if author_id.getAttribute('source') in identifiers_mapping and author_id.firstChild:
identifiers.append([
author_id.firstChild.nodeValue,
identifiers_mapping[author_id.getAttribute('source')]])
if not identifiers:
identifiers.append(['', default_identifier])
author_info = [long(i+1),
'',
last_name,
first_name,
full_name,
status,
author_affiliations,
identifiers]
author_list.append(author_info)
output.update({'authors': author_list})
output.update({'affiliations': affiliation_list})
# Add generic information about the paper
output.update({'collaboration': collaboration_name,
'experiment_number': experiment_number,
'last_modified': int(time.time()),
'reference_ids': [],
'paper_id': '1',
'paper_title': ''})
return output
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runauthorlist')
if auth_code != 0:
referer = '/authorlist/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="authorlist")
else:
return None
def check_user_rights(user_id, paper_id):
"""Check if user can modify this paper"""
# if the paper_id is empty - user is trying to create new record
# we allow him, because everyone can do that
if not paper_id or (user_id == get_owner(paper_id)):
return True
return False
class Converter(object):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'converted.txt'
def __init__(self):
raise NotImplementedError
def dump(self, data):
raise NotImplementedError
def dumps(self, data):
raise NotImplementedError
class NA62Latex(Converter):
FILE_NAME = 'la.tex'
def __init__(self):
pass
def dump(self, data):
pass
def dumps(self, data):
pass
class ElsevierArticle(Converter):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'elsarticle.tex'
cal = '{http://www.slac.stanford.edu/spires/hepnames/authors_xml/}'
foaf = '{http://xmlns.com/foaf/0.1/}'
def __init__(self):
pass
def dictionary_to_list(self, node):
res = {}
res[node.tag] = []
self.xmltodict(node, res[node.tag])
reply = {}
reply[node.tag] = {'value': res[node.tag], 'attribs': node.attrib, 'tail': node.tail}
return reply
def xmltodict(self, node, res):
rep = {}
if len(node):
for n in list(node):
rep[node.tag] = []
value = self.xmltodict(n, rep[node.tag])
if len(n):
value = {'value': rep[node.tag], 'attributes': n.attrib, 'tail': n.tail}
res.append({n.tag: value})
else:
res.append(rep[node.tag][0])
else:
value = {}
value = {'value': node.text, 'attributes': node.attrib, 'tail': node.tail}
res.append({node.tag: value})
return
def get_organizations(self, organizations):
organization_dict = dict()
for orgs_element in organizations:
key = orgs_element.keys()[0]
if key == self.foaf + 'Organization':
for name_element in orgs_element[key]['value']:
value_key = name_element.keys()[0]
if value_key == self.cal + 'orgAddress':
if name_element[value_key]['value']:
organization_dict[orgs_element[key]['attributes']['id']] = name_element[value_key]['value'].encode('utf-8')
else:
organization_dict[orgs_element[key]['attributes']['id']] = ''
break
return organization_dict
def get_authors(self, authors):
author_list = []
for auth_element in authors:
key = auth_element.keys()[0]
if key == self.foaf + 'Person':
affiliation_list = []
given_name = ''
family_name = ''
for name_element in auth_element[key]['value']:
value_key = name_element.keys()[0]
if value_key == self.foaf + 'familyName' and name_element[value_key]['value']:
family_name = name_element[value_key]['value'].encode('utf-8')
elif value_key == self.foaf + 'givenName' and name_element[value_key]['value']:
given_name = name_element[value_key]['value'].encode('utf-8')
elif value_key == self.cal + 'authorAffiliations':
for aff_element in name_element[value_key]['value']:
aff_key = aff_element.keys()[0]
if aff_key == self.cal + 'authorAffiliation':
if aff_element[aff_key]['attributes']['connection'] == 'Affiliated with':
affiliation_list.append(aff_element[aff_key]['attributes']['organizationid'])
author_list.append([(given_name, family_name), tuple(affiliation_list)])
return author_list
def dump(self, data):
AuthorsXMLConverter = Converters.get('authorsxml')
AuthorsXML = dumps(data, AuthorsXMLConverter)
root = ET.fromstring(AuthorsXML)
tree = ET.ElementTree(root)
res = self.dictionary_to_list(tree.getroot())
collaboration_author_list_values = res['collaborationauthorlist']['value']
organization_dict = dict()
author_list = []
for element in collaboration_author_list_values:
key = element.keys()[0]
# if the value of the key is empty, start next loop cycle
if element[key]['value'] is None:
continue
if key == self.cal + 'organizations':
organization_dict = self.get_organizations(element[key]['value'])
elif key == self.cal + 'authors':
author_list = self.get_authors(element[key]['value'])
clusters = []
organization_codes = []
for element in author_list:
if len(element[1]) >= 1:
organization_code = element[1][0]
other_affiliations = element[1][1:]
author = [element[0]]
if other_affiliations:
author.extend(other_affiliations)
# if this organization already exists in the cluster
if organization_code in organization_codes:
for cluster in clusters:
if cluster[0] == organization_code:
cluster.append(author)
break
else:
organization_codes.append(organization_code)
clusters.append([organization_code, author])
myout = ""
myout += "\\documentclass[a4paper,12pt]{article}\r\n"
myout += "\\usepackage[utf8]{inputenc}\r\n"
myout += "\\begin{document}\r\n"
myout += "\\begin{center}\r\n"
myout += "{\\Large Collaboration}\\\\\r\n"
myout += "\\vspace{2mm}\r\n%\r\n"
primary_output_string = ""
secondary_affiliation_count = 1
secondary_affiliations = ""
secondary_affiliations_pos = {}
for data in clusters:
primary_output = []
organization_code = data[0]
for author in data[1:]:
name = " " + str(escape_latex(author[0][0])) + '~' + str(escape_latex(author[0][1]))
if len(author) > 1:
for sec_affiliation in author[1:]:
if sec_affiliation in organization_dict.keys():
if organization_dict[sec_affiliation] in secondary_affiliations_pos.keys():
name += "$\\,$\\footnotemark[" + str(secondary_affiliations_pos[organization_dict[sec_affiliation]]) + "]"
else:
name += "$\\,$\\footnotemark[" + str(secondary_affiliation_count) + "]"
secondary_affiliations += "%\r\n\\footnotetext[" + str(secondary_affiliation_count) + "]{" + str(escape_latex(organization_dict[sec_affiliation])) + "}\r\n"
secondary_affiliations_pos[organization_dict[sec_affiliation]] = secondary_affiliation_count
secondary_affiliation_count += 1
primary_output.append(name)
if organization_dict.get(data[0]):
organization = organization_dict.get(data[0])
else:
organization = UNKNOWN_AFFILIATION
primary_output_string += ',\r\n'.join(primary_output) + " \\\\\r\n{\\em \\small " + str(escape_latex(organization)) + "} \\\\[0.2cm]\r\n%\r\n"
myout += primary_output_string
myout += "\\end{center}\r\n"
myout += "\\setcounter{footnote}{0}\r\n"
myout += secondary_affiliations
myout += "\\end{document}\r\n"
return myout
def dumps(self, data):
return self.dump(data)
class APSpaper(Converter):
CONTENT_TYPE = 'text/plain'
FILE_NAME = 'APSpaper.tex'
def __init__(self):
pass
def dump(self, data):
AuthorsXMLConverter = Converters.get('authorsxml')
AuthorsXML = dumps(data, AuthorsXMLConverter)
organizations_list = []
authors_list = []
root = ET.fromstring(AuthorsXML)
# save affiliations
for organization in root.findall('{%s}organizations/{%s}Organization' % (NAMESPACES['cal'], NAMESPACES['foaf'])):
org_id = organization.attrib['id']
org_name = ''
if organization.find('{%s}name' % NAMESPACES['foaf']) is not None:
org_name = organization.find('{%s}name' % NAMESPACES['foaf']).text or ''
organizations_list.append([org_id, org_name.encode('utf-8')])
# save authors
for author in root.findall('{%s}authors/{%s}Person' % (NAMESPACES['cal'], NAMESPACES['foaf'])):
author_name = ''
author_affiliations = []
if author.find('{%s}authorNamePaper' % NAMESPACES['cal']) is not None:
author_name = author.find('{%s}authorNamePaper' % NAMESPACES['cal']).text or ''
for affil in author.findall('{%(cal)s}authorAffiliations/{%(cal)s}authorAffiliation' % {'cal': NAMESPACES['cal']}):
author_affiliations.append(affil.attrib['organizationid'])
authors_list.append([author_name.encode('utf-8'), author_affiliations])
myout = ''
for author in authors_list:
myout += '\\author{' + str(escape_latex(author[0])) + '$^{' + ','.join(author[1]) + '}$}\r\n'
for org in organizations_list:
myout += '\\affiliation{$^{' + str(org[0]) + '}$ ' + str(escape_latex(org[1])) + '}\r\n'
return myout
def dumps(self, data):
return self.dump(data)
class AuthorsXML(Converter):
CONTENT_TYPE = 'text/xml'
FILE_NAME = 'authors.xml'
def __init__(self):
pass
def create_affiliation(self, document, parsed, organization_ids):
affiliation = document.createElement('cal:authorAffiliation')
affiliation_acronym = parsed[cfg.JSON.AFFILIATION_ACRONYM]
affiliation_status = parsed[cfg.JSON.AFFILIATION_STATUS]
if affiliation_acronym not in organization_ids:
affiliation.setAttribute('organizationid',
'Error - there is no organization called ' +
affiliation_acronym)
else:
affiliation.setAttribute('organizationid',
organization_ids[affiliation_acronym])
affiliation.setAttribute('connection', affiliation_status)
return affiliation
def create_identifier(self, document, parsed):
identifier = document.createElement('cal:authorid')
identifier_number = parsed[cfg.JSON.IDENTIFIER_NUMBER]
identifier_name = parsed[cfg.JSON.IDENTIFIER_NAME]
identifier.setAttribute('source', identifier_name)
identifier_text = document.createTextNode(identifier_number)
identifier.appendChild(identifier_text)
return identifier
def create_authors(self, document, root, parsed, organization_ids):
parsed_authors = parsed[cfg.JSON.AUTHORS_KEY]
authors = document.createElement('cal:authors')
root.appendChild(authors)
for parsed_author in parsed_authors:
author = self.create_author(document, parsed_author, organization_ids)
authors.appendChild(author)
def create_author(self, document, parsed, organization_ids):
author = document.createElement('foaf:Person')
# paper name
paper_name = document.createElement('cal:authorNamePaper')
paper_name_info = parsed[cfg.JSON.PAPER_NAME]
paper_name_text = document.createTextNode(paper_name_info)
paper_name.appendChild(paper_name_text)
author.appendChild(paper_name)
# given name
given_name_info = parsed[cfg.JSON.GIVEN_NAME]
if (cfg.EMPTY.match(given_name_info) is None):
given_name = document.createElement('foaf:givenName')
given_name_text = document.createTextNode(given_name_info)
given_name.appendChild(given_name_text)
author.appendChild(given_name)
# family name
family_name_info = parsed[cfg.JSON.FAMILY_NAME]
if (cfg.EMPTY.match(family_name_info) is None):
family_name = document.createElement('foaf:familyName')
family_name_text = document.createTextNode(family_name_info)
family_name.appendChild(family_name_text)
author.appendChild(family_name)
# status
author_status_info = parsed[cfg.JSON.STATUS]
if (author_status_info):
author_status = document.createElement('cal:authorStatus')
author_status_text = document.createTextNode(author_status_info)
author_status.appendChild(author_status_text)
author.appendChild(author_status)
# collaboration
collaboration = document.createElement('cal:authorCollaboration')
collaboration.setAttribute('collaborationid', cfg.AuthorsXML.COLLABORATION_ID)
author.appendChild(collaboration)
# affiliations
affiliations = document.createElement('cal:authorAffiliations')
author.appendChild(affiliations)
for parsed_affiliation in parsed[cfg.JSON.AFFILIATIONS]:
affiliation = self.create_affiliation(document, parsed_affiliation, organization_ids)
affiliations.appendChild(affiliation)
# identifiers
identifiers = document.createElement('cal:authorids')
author.appendChild(identifiers)
for parsed_identifier in parsed[cfg.JSON.IDENTIFIERS]:
identifier = self.create_identifier(document, parsed_identifier)
identifiers.appendChild(identifier)
return author
def create_collaboration(self, document, root, parsed):
# collaborations
collaborations = document.createElement('cal:collaborations')
collaboration = document.createElement('cal:collaboration')
collaboration.setAttribute('id', cfg.AuthorsXML.COLLABORATION_ID)
collaborations.appendChild(collaboration)
# name
name = document.createElement('foaf:name')
name_info = parsed[cfg.JSON.COLLABORATION]
name_text = document.createTextNode(name_info)
name.appendChild(name_text)
collaboration.appendChild(name)
# experiment number
experiment_number_info = parsed[cfg.JSON.EXPERIMENT_NUMBER]
if (cfg.EMPTY.match(experiment_number_info) is None):
experiment_number = document.createElement('cal:experimentNumber')
experiment_number_text = document.createTextNode(experiment_number_info)
experiment_number.appendChild(experiment_number_text)
collaboration.appendChild(experiment_number)
root.appendChild(collaborations)
def create_document(self):
dom = minidom.getDOMImplementation()
document = dom.createDocument(None, 'collaborationauthorlist', None)
root = document.documentElement
root.setAttribute('xmlns:foaf', 'http://xmlns.com/foaf/0.1/')
root.setAttribute('xmlns:cal', 'http://www.slac.stanford.edu/spires/hepnames/authors_xml/')
return document, root
def create_header(self, document, root, parsed):
# creation date
creation_date = document.createElement('cal:creationDate')
creation_date_info = time.strftime(cfg.AuthorsXML.TIME_FORMAT)
creation_date_text = document.createTextNode(creation_date_info)
creation_date.appendChild(creation_date_text)
root.appendChild(creation_date)
# publication reference
for reference_info in parsed[cfg.JSON.REFERENCE_IDS]:
reference = document.createElement('cal:publicationReference')
reference_text = document.createTextNode(reference_info)
reference.appendChild(reference_text)
root.appendChild(reference)
def create_organizations(self, document, root, parsed, ids):
parsed_organizations = parsed[cfg.JSON.AFFILIATIONS_KEY]
# organizations container
organizations = document.createElement('cal:organizations')
root.appendChild(organizations)
# create individual organizations and append them
for parsed_organization in parsed_organizations:
organization = self.create_organization(document, parsed_organization, ids)
organizations.appendChild(organization)
def create_organization(self, document, parsed, ids):
acronym = parsed[cfg.JSON.ACRONYM]
organization = document.createElement('foaf:Organization')
organization.setAttribute('id', ids[acronym])
# create the domain node if field is set
domain_info = parsed[cfg.JSON.DOMAIN]
if (cfg.EMPTY.match(domain_info) is None):
domain = document.createElement('cal:orgDomain')
domain_text = document.createTextNode(domain_info)
domain.appendChild(domain_text)
organization.appendChild(domain)
# organization name, no presence check, already done on the client side
name = document.createElement('foaf:name')
name_info = parsed[cfg.JSON.NAME]
name_text = document.createTextNode(name_info)
name.appendChild(name_text)
organization.appendChild(name)
# organization acronym
org_acronym = document.createElement('cal:orgName')
org_acronym_text = document.createTextNode(acronym)
org_acronym.appendChild(org_acronym_text)
organization.appendChild(org_acronym)
# organization identifier
org_name_info = parsed[cfg.JSON.SPIRES_ID]
if (cfg.EMPTY.match(org_name_info) is None):
org_name = document.createElement('cal:orgName')
org_name.setAttribute('source', cfg.AuthorsXML.SPIRES)
org_name_text = document.createTextNode(org_name_info)
org_name.appendChild(org_name_text)
organization.appendChild(org_name)
else:
org_name_info = parsed[cfg.JSON.NAME]
org_address = document.createElement('cal:orgAddress')
org_address_text = document.createTextNode(org_name_info)
org_address.appendChild(org_address_text)
organization.appendChild(org_address)
# membership
org_status_info = parsed[cfg.JSON.MEMBER]
if (not org_status_info):
org_status_info = cfg.AuthorsXML.NONMEMBER
else:
org_status_info = cfg.AuthorsXML.MEMBER
org_status = document.createElement('cal:orgStatus')
org_status_text = document.createTextNode(org_status_info)
org_status.appendChild(org_status_text)
organization.appendChild(org_status)
# umbrella organization/group
group_info = parsed[cfg.JSON.UMBRELLA]
if (cfg.EMPTY.match(group_info) is None):
if group_info in ids.keys():
group = document.createElement('cal:group')
group.setAttribute('with', ids[group_info])
organization.appendChild(group)
return organization
def dump(self, data):
parsed = json.loads(data)
document, root = self.create_document()
affiliations = parsed[cfg.JSON.AFFILIATIONS_KEY]
organization_ids = self.generate_organization_ids(affiliations)
self.create_header(document, root, parsed)
self.create_collaboration(document, root, parsed)
self.create_organizations(document, root, parsed, organization_ids)
self.create_authors(document, root, parsed, organization_ids)
return document
def dumps(self, data):
# FIX for toprettyxml function from website:
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 and self.childNodes[0].nodeType == minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s" % (newl))
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
# replace minidom's function with ours
minidom.Element.writexml = fixed_writexml
# End of FIX
return self.dump(data).toprettyxml(indent=' ', newl='\r\n', encoding='utf-8')
def generate_organization_ids(self, organizations):
ids = {}
# Map each organization acronym to an id of the kind 'o[index]'
for index, organization in enumerate(organizations):
acronym = organization[cfg.JSON.ACRONYM]
ids[acronym] = cfg.AuthorsXML.ORGANIZATION_ID + str(index)
return ids
class Converters:
__converters__ = {'authorsxml': AuthorsXML, 'elsevier': ElsevierArticle, 'aps': APSpaper}
@classmethod
def get(cls, format):
return cls.__converters__.get(format)
def dump(data, converter):
return converter().dump(data)
def dumps(data, converter):
return converter().dumps(data)
| gpl-2.0 |
acasadoalonso/SWiface-PHP | soa2pil.py | 1 | 9949 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys
import json
import urllib.request, urllib.error, urllib.parse
import base64
import datetime
import time
import hmac
import hashlib
import base64
import OpenSSL
import uritemplate
import pycountry
import math
import os
import socket
import config
from simplehal import HalDocument, Resolver
from pprint import pprint
#-------------------------------------------------------------------------------------------------------------------#
##################################################################
def getapidata(url, auth): # get the data from the API server
req = urllib.request.Request(url)
req.add_header('Authorization', auth) # build the authorization header
req.add_header("Accept", "application/json")
req.add_header("Content-Type", "application/hal+json")
r = urllib.request.urlopen(req) # open the url resource
j_obj = json.load(r) # convert to JSON
return j_obj # return the JSON object
###################################################################
# get the data from the soaring spot and return it as a HAL document
def gdata(url, key, prt='no'):
global auth # auth and apiurl are globals
global apiurl
j_obj = getapidata(url, auth) # call the fuction that get it
# convert to HAL
if prt == 'yes': # if print is required
print(json.dumps(j_obj, indent=4))
cd = HalDocument.get_data(HalDocument.from_python(
j_obj), apiurl+'rel/' + key) # get the data from the HAL document
return cd
def getemb(base, ctype):
global apiurl
return(base['_embedded'][apiurl+'rel/'+ctype])
def getlinks(base, ctype):
global apiurl
return (base['_links'][apiurl+'rel/'+ctype]['href'])
###################################################################
# see if index day is requestedd
day = sys.argv[1:]
if day and day[0].isdigit(): # if provided and numeric
idx = int(day[0]) # index day
else:
idx = 0
clsreq = sys.argv[2:] # if class is requested
if clsreq:
classreq = clsreq[0] # class requested
print("TTT", classreq)
else:
classreq = ' ' # none
# ---------------------------------------------------------------- #
print("\n\n")
print("Utility to get the api.soaringspot.com data and extract all the PILOT information needed for FlyTool V1.0")
print("==========================================================================================================\n\n")
print("Index day: ", idx, " Class requested: ", classreq)
print("Reading data from clientid/secretkey files")
# ===== SETUP parameters =======================#
# where to find the SQLITE3 database
SWdbpath = config.DBpath
initials = config.Initials # initials of the files generated
cwd = os.getcwd() # get the current working directory
# where to store the JSON files
cucpath = config.cucFileLocation
# where to find the clientid and secretkey files
secpath = cwd+"/SoaringSpot/"
apiurl = "http://api.soaringspot.com/" # soaringspot API URL
rel = "v1" # we use API version 1
taskType = "SailplaneRacing" # race type
# ==============================================#
hostname = socket.gethostname() # hostname as control
print("Hostname:", hostname)
start_time = time.time() # get the time now
utc = datetime.datetime.utcnow() # the UTC time
# print the time for information only
print("UTC Time is now:", utc)
date = utc.strftime("%Y-%m-%dT%H:%M:%SZ") # get the local time
print(date) #
local_time = datetime.datetime.now() # the local time
print("Local Time is now:", local_time) # print the time for information only
fl_date_time = local_time.strftime("%Y%m%d") # get the local time
print("Config params. SECpath:", secpath)
# nonce=base64.b64encode(OpenSSL.rand.bytes(36)) # get the once base
nonce = base64.b64encode(os.urandom(36)) # get the once base
# open the file with the client id
f = open(secpath+"clientid")
client = f.read() # read it
# clear the whitespace at the end
client = client.rstrip()
# open the file with the secret key
f = open(secpath+"secretkey")
secretkey = f.read() # read it
# clear the whitespace at the end
secretkey = secretkey.rstrip().encode(encoding='utf-8')
message = nonce+date.encode(encoding='utf-8')+client.encode(encoding='utf-8') # build the message
# and the message digest
digest = hmac.new(secretkey, msg=message, digestmod=hashlib.sha256).digest()
signature = str(base64.b64encode(digest).decode()) # build the digital signature
# the AUTHORIZATION ID is built now
auth = apiurl+rel+'/hmac/v1 ClientID="'+client+'",Signature="' + \
signature+'",Nonce="'+nonce.decode(encoding='utf-8')+'",Created="'+date+'" '
#print ("URLiauth:", auth)
# get the initial base of the tree
url1 = apiurl+rel
# get the contest data, first instance
cd = gdata(url1, 'contests', prt='no')[0]
category = cd['category'] # get the main data from the contest
eventname = cd['name']
compid = cd['id']
country = cd['country'] # country code - 2 chars code
compcountry = country # contry as defaults for pilots
# convert the 2 chars ID to the 3 chars ID
ccc = pycountry.countries.get(alpha_2=country)
country = ccc.alpha_3
endate = cd['end_date']
lc = getemb(cd, 'location') # location data
lcname = lc['name'] # location name
print("\n\n= Contest ===============================")
print("Category:", category, "Comp name:", eventname, "Comp ID:", compid)
print("Loc Name:", lcname, "Country: ", country, country, "End date:", endate)
print("=========================================\n\n")
npil = 0 # init the number of pilots
nwarnings = 0 # number of warnings ...
warnings = [] # warnings glider
classes = []
pilots = []
# Build the tracks and turn points, exploring the contestants and task within each class
# go thru the different classes now within the daya
print("Classes:\n========\n\n")
for cl in getemb(cd, 'classes'):
#print "CLCLCL", cl
classname = cl["type"] # search for each class
print("Class:", classname, "\n\n") # search for each class
# search for the contestants on each class
url3 = getlinks(cl, "contestants")
ctt = gdata(url3, "contestants") # get the contestants data
#print "CTTCTT",ctt
pilots = []
for contestants in ctt:
#print "FT", ft, "\n\n"
fname = getemb(contestants, 'pilot')[0]['first_name']
lname = getemb(contestants, 'pilot')[0]['last_name']
# convert it to utf8 in order to avoid problems
pname = fname.encode('utf-8').decode('utf-8')+" "+lname.encode('utf-8').decode('utf-8')
if 'club' in contestants:
club = contestants['club'].encode('utf-8').decode('utf-8')
else:
club = "club_NOTYET"
if 'aircraft_model' in contestants:
ar = contestants['aircraft_model']
else:
ar = "am_NOTYET"
if 'contestant_number' in contestants:
cn = contestants['contestant_number']
else:
cn = "cn_NOTYET"
if 'nationality' in getemb(contestants, 'pilot')[0]:
nation = getemb(contestants, 'pilot')[0]['nationality']
else:
if compcountry != '':
nation = compcountry
else:
nation = "ES" # by default is SPAIN
# convert the 2 chars ID to the 3 chars ID
ccc = pycountry.countries.get(alpha_2=nation)
country3 = ccc.alpha_3
if 'email' in getemb(contestants, 'pilot')[0]:
email = getemb(contestants, 'pilot')[0]['email']
else:
email = "email_NOTYET"
igcid = getemb(contestants, 'pilot')[0]['igc_id']
print("Pilot:", pname, "Club:", club, "CompID:", cn, "Nation:", nation, "Country Code", country3, "Email:", email, "IGCID:", igcid)
npil += 1
pil = {"PilotName": pname, "Club": club, "CompID": cn, "Nation": nation, "CountryCode": country3,
"Email": email, "IgcID": igcid, "PortraitUrl": "http://rankingdata.fai.org/PilotImages/"+str(igcid)+".jpg"}
pilots.append(pil)
cll = {"Class": classname, "Pilots": pilots}
classes.append(cll)
print("----------------------------------------------------------------\n\n")
# print the number of pilots as a reference and control
print("= Pilots ===========================", npil, "\n\n")
FlyTool = {"Compname": eventname, "Category": category, "Country": country,
"EndDate": endate, "Location": lcname, "Classes": classes}
jsonfile = open("FlyTool.json", 'w')
j = json.dumps(FlyTool, indent=4)
jsonfile.write(j)
jsonfile.close()
print(j)
| gpl-2.0 |
ChromiumWebApps/chromium | media/tools/layout_tests/test_expectations_history.py | 156 | 5156 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the history of the test expectation file."""
from datetime import datetime
from datetime import timedelta
import os
import re
import sys
import tempfile
import time
import pysvn
TEST_EXPECTATIONS_ROOT = 'http://src.chromium.org/blink/trunk/'
# A map from earliest revision to path.
# TODO(imasaki): support multiple test expectation files.
TEST_EXPECTATIONS_LOCATIONS = {
148348: 'LayoutTests/TestExpectations',
119317: 'LayoutTests/platform/chromium/TestExpectations',
0: 'LayoutTests/platform/chromium/test_expectations.txt'}
TEST_EXPECTATIONS_DEFAULT_PATH = (
TEST_EXPECTATIONS_ROOT + TEST_EXPECTATIONS_LOCATIONS[148348])
class TestExpectationsHistory(object):
"""A class to represent history of the test expectation file.
The history is obtained by calling PySVN.log()/diff() APIs.
TODO(imasaki): Add more functionalities here like getting some statistics
about the test expectation file.
"""
@staticmethod
def GetTestExpectationsPathForRevision(revision):
for i in sorted(TEST_EXPECTATIONS_LOCATIONS.keys(), reverse=True):
if revision >= i:
return TEST_EXPECTATIONS_ROOT + TEST_EXPECTATIONS_LOCATIONS[i]
@staticmethod
def GetDiffBetweenTimes(start, end, testname_list,
te_location=TEST_EXPECTATIONS_DEFAULT_PATH):
"""Get difference between time period for the specified test names.
Given the time period, this method first gets the revision number. Then,
it gets the diff for each revision. Finally, it keeps the diff relating to
the test names and returns them along with other information about
revision.
Args:
start: A timestamp specifying start of the time period to be
looked at.
end: A timestamp object specifying end of the time period to be
looked at.
testname_list: A list of strings representing test names of interest.
te_location: A location of the test expectation file.
Returns:
A list of tuples (old_rev, new_rev, author, date, message, lines). The
|lines| contains the diff of the tests of interest.
"""
temp_directory = tempfile.mkdtemp()
test_expectations_path = os.path.join(temp_directory, 'TestExpectations')
# Get directory name which is necesary to call PySVN.checkout().
te_location_dir = te_location[0:te_location.rindex('/')]
client = pysvn.Client()
client.checkout(te_location_dir, temp_directory, recurse=False)
# PySVN.log() (http://pysvn.tigris.org/docs/pysvn_prog_ref.html
# #pysvn_client_log) returns the log messages (including revision
# number in chronological order).
logs = client.log(test_expectations_path,
revision_start=pysvn.Revision(
pysvn.opt_revision_kind.date, start),
revision_end=pysvn.Revision(
pysvn.opt_revision_kind.date, end))
result_list = []
gobackdays = 1
while gobackdays < sys.maxint:
goback_start = time.mktime(
(datetime.fromtimestamp(start) - (
timedelta(days=gobackdays))).timetuple())
logs_before_time_period = (
client.log(test_expectations_path,
revision_start=pysvn.Revision(
pysvn.opt_revision_kind.date, goback_start),
revision_end=pysvn.Revision(
pysvn.opt_revision_kind.date, start)))
if logs_before_time_period:
# Prepend at the beginning of logs.
logs.insert(0, logs_before_time_period[len(logs_before_time_period)-1])
break
gobackdays *= 2
for i in xrange(len(logs) - 1):
old_rev = logs[i].revision.number
new_rev = logs[i + 1].revision.number
# Parsing the actual diff.
new_path = TestExpectationsHistory.GetTestExpectationsPathForRevision(
new_rev);
old_path = TestExpectationsHistory.GetTestExpectationsPathForRevision(
old_rev);
text = client.diff(temp_directory,
url_or_path=old_path,
revision1=pysvn.Revision(
pysvn.opt_revision_kind.number, old_rev),
url_or_path2=new_path,
revision2=pysvn.Revision(
pysvn.opt_revision_kind.number, new_rev))
lines = text.split('\n')
target_lines = []
for line in lines:
for testname in testname_list:
matches = re.findall(testname, line)
if matches:
if line[0] == '+' or line[0] == '-':
target_lines.append(line)
if target_lines:
# Needs to convert to normal date string for presentation.
result_list.append((
old_rev, new_rev, logs[i + 1].author,
datetime.fromtimestamp(
logs[i + 1].date).strftime('%Y-%m-%d %H:%M:%S'),
logs[i + 1].message, target_lines))
return result_list
| bsd-3-clause |
andrewleech/SickRage | lib/unidecode/x02c.py | 246 | 3596 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'L', # 0x60
'l', # 0x61
'L', # 0x62
'P', # 0x63
'R', # 0x64
'a', # 0x65
't', # 0x66
'H', # 0x67
'h', # 0x68
'K', # 0x69
'k', # 0x6a
'Z', # 0x6b
'z', # 0x6c
'', # 0x6d
'M', # 0x6e
'A', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| gpl-3.0 |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.91/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py | 82 | 13262 | '''
Unit tests for oc secret add
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_serviceaccount_secret import OCServiceAccountSecret, locate_oc_binary # noqa: E402
try:
import ruamel.yaml as yaml # noqa: EF401
YAML_TYPE = 'ruamel'
except ImportError:
YAML_TYPE = 'pyyaml'
class OCServiceAccountSecretTest(unittest.TestCase):
'''
Test class for OCServiceAccountSecret
'''
@mock.patch('oc_serviceaccount_secret.locate_oc_binary')
@mock.patch('oc_serviceaccount_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount_secret.Yedit._write')
@mock.patch('oc_serviceaccount_secret.OCServiceAccountSecret._run')
def test_adding_a_secret_to_a_serviceaccount(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing adding a secret to a service account '''
# Arrange
# run_ansible input parameters
params = {
'state': 'present',
'namespace': 'default',
'secret': 'newsecret',
'service_account': 'builder',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False,
}
oc_get_sa_before = '''{
"apiVersion": "v1",
"imagePullSecrets": [
{
"name": "builder-dockercfg-rsrua"
}
],
"kind": "ServiceAccount",
"metadata": {
"name": "builder",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/serviceaccounts/builder",
"uid": "cf47bca7-ebc4-11e6-b041-0ed9df7abc38",
"resourceVersion": "302879",
"creationTimestamp": "2017-02-05T17:02:00Z"
},
"secrets": [
{
"name": "builder-dockercfg-rsrua"
},
{
"name": "builder-token-akqxi"
}
]
}
'''
oc_get_sa_after = '''{
"apiVersion": "v1",
"imagePullSecrets": [
{
"name": "builder-dockercfg-rsrua"
}
],
"kind": "ServiceAccount",
"metadata": {
"name": "builder",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/serviceaccounts/builder",
"uid": "cf47bca7-ebc4-11e6-b041-0ed9df7abc38",
"resourceVersion": "302879",
"creationTimestamp": "2017-02-05T17:02:00Z"
},
"secrets": [
{
"name": "builder-dockercfg-rsrua"
},
{
"name": "builder-token-akqxi"
},
{
"name": "newsecret"
}
]
}
'''
builder_ryaml_file = '''\
secrets:
- name: builder-dockercfg-rsrua
- name: builder-token-akqxi
- name: newsecret
kind: ServiceAccount
imagePullSecrets:
- name: builder-dockercfg-rsrua
apiVersion: v1
metadata:
name: builder
namespace: default
resourceVersion: '302879'
creationTimestamp: '2017-02-05T17:02:00Z'
selfLink: /api/v1/namespaces/default/serviceaccounts/builder
uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
'''
builder_pyyaml_file = '''\
apiVersion: v1
imagePullSecrets:
- name: builder-dockercfg-rsrua
kind: ServiceAccount
metadata:
creationTimestamp: '2017-02-05T17:02:00Z'
name: builder
namespace: default
resourceVersion: '302879'
selfLink: /api/v1/namespaces/default/serviceaccounts/builder
uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
secrets:
- name: builder-dockercfg-rsrua
- name: builder-token-akqxi
- name: newsecret
'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(0, oc_get_sa_before, ''), # First call to the mock
(0, oc_get_sa_before, ''), # Second call to the mock
(0, 'serviceaccount "builder" replaced', ''), # Third call to the mock
(0, oc_get_sa_after, ''), # Fourth call to the mock
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
# Act
results = OCServiceAccountSecret.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['state'], 'present')
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),
mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None)
])
yaml_file = builder_pyyaml_file
if YAML_TYPE == 'ruamel':
yaml_file = builder_ryaml_file
mock_write.assert_has_calls([
mock.call(mock.ANY, yaml_file)
])
@mock.patch('oc_serviceaccount_secret.locate_oc_binary')
@mock.patch('oc_serviceaccount_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount_secret.Yedit._write')
@mock.patch('oc_serviceaccount_secret.OCServiceAccountSecret._run')
def test_removing_a_secret_to_a_serviceaccount(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing removing a secret to a service account '''
# Arrange
# run_ansible input parameters
params = {
'state': 'absent',
'namespace': 'default',
'secret': 'newsecret',
'service_account': 'builder',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False,
}
oc_get_sa_before = '''{
"apiVersion": "v1",
"imagePullSecrets": [
{
"name": "builder-dockercfg-rsrua"
}
],
"kind": "ServiceAccount",
"metadata": {
"name": "builder",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/serviceaccounts/builder",
"uid": "cf47bca7-ebc4-11e6-b041-0ed9df7abc38",
"resourceVersion": "302879",
"creationTimestamp": "2017-02-05T17:02:00Z"
},
"secrets": [
{
"name": "builder-dockercfg-rsrua"
},
{
"name": "builder-token-akqxi"
},
{
"name": "newsecret"
}
]
}
'''
builder_ryaml_file = '''\
secrets:
- name: builder-dockercfg-rsrua
- name: builder-token-akqxi
kind: ServiceAccount
imagePullSecrets:
- name: builder-dockercfg-rsrua
apiVersion: v1
metadata:
name: builder
namespace: default
resourceVersion: '302879'
creationTimestamp: '2017-02-05T17:02:00Z'
selfLink: /api/v1/namespaces/default/serviceaccounts/builder
uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
'''
builder_pyyaml_file = '''\
apiVersion: v1
imagePullSecrets:
- name: builder-dockercfg-rsrua
kind: ServiceAccount
metadata:
creationTimestamp: '2017-02-05T17:02:00Z'
name: builder
namespace: default
resourceVersion: '302879'
selfLink: /api/v1/namespaces/default/serviceaccounts/builder
uid: cf47bca7-ebc4-11e6-b041-0ed9df7abc38
secrets:
- name: builder-dockercfg-rsrua
- name: builder-token-akqxi
'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(0, oc_get_sa_before, ''), # First call to the mock
(0, oc_get_sa_before, ''), # Second call to the mock
(0, 'serviceaccount "builder" replaced', ''), # Third call to the mock
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
# Act
results = OCServiceAccountSecret.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['state'], 'absent')
# Making sure our mocks were called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'sa', 'builder', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'replace', '-f', mock.ANY, '-n', 'default'], None),
])
yaml_file = builder_pyyaml_file
if YAML_TYPE == 'ruamel':
yaml_file = builder_ryaml_file
mock_write.assert_has_calls([
mock.call(mock.ANY, yaml_file)
])
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| apache-2.0 |
cancan101/tensorflow | tensorflow/contrib/bayesflow/__init__.py | 57 | 1871 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.contrib.bayesflow.python.ops import entropy
from tensorflow.contrib.bayesflow.python.ops import monte_carlo
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import stochastic_variables
from tensorflow.contrib.bayesflow.python.ops import variational_inference
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['entropy', 'monte_carlo',
'special_math', 'stochastic_gradient_estimators',
'stochastic_graph', 'stochastic_tensor',
'stochastic_variables', 'variational_inference']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
mancoast/CPythonPyc_test | fail/301_test_richcmp.py | 5 | 11121 | # Tests for rich comparisons
import unittest
from test import support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise support.TestFailed("Number.__cmp__() should not be called")
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
__hash__ = None # Vectors cannot be hashed
def __bool__(self):
raise TypeError("Vectors cannot be used in Boolean contexts")
def __cmp__(self, other):
raise support.TestFailed("Vector.__cmp__() should not be called")
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError("Cannot compare vectors of different length")
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in range(len(realres)):
# results are bool, so we can use "is" here
self.assert_(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = list(range(5))
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.values():
for op in ops:
# calls __bool__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in range(3):
for b in range(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.values():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assert_(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self, other): return 0
def __gt__(self, other): return 0
def __eq__(self, other): return 0
def __le__(self, other): raise TestFailed("This shouldn't happen")
def __ge__(self, other): raise TestFailed("This shouldn't happen")
def __ne__(self, other): raise TestFailed("This shouldn't happen")
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
def test_not(self):
# Check that exceptions in __bool__ are properly
# propagated by the not operator
import operator
class Exc(Exception):
pass
class Bad:
def __bool__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
def test_recursion(self):
# Check that comparison for recursive objects fails gracefully
from collections import UserList
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
b.append(17)
# Even recursive lists of different lengths are different,
# but they cannot be ordered
self.assert_(not (a == b))
self.assert_(a != b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
a.append(17)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
a.insert(0, 11)
b.insert(0, 12)
self.assert_(not (a == b))
self.assert_(a != b)
self.assert_(a < b)
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__ (and
# __hash__). Complex numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = list(imag1a.items())
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assertEqual(imag1a, imag1a)
self.assertEqual(imag1a, imag1b)
self.assertEqual(imag2, imag2)
self.assert_(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def assertIs(self, a, b):
self.assert_(a is b)
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc(Exception):
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
def test_main():
support.run_unittest(VectorTest, NumberTest, MiscTest, DictTest, ListTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
patdoyle1/FastMath | appengine-try-python-flask/lib/werkzeug/contrib/lint.py | 318 | 12282 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| gpl-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/operation_status_response.py | 1 | 1836 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationStatusResponse(Model):
"""Operation status response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Operation ID
:vartype name: str
:ivar status: Operation status
:vartype status: str
:ivar start_time: Start time of the operation
:vartype start_time: datetime
:ivar end_time: End time of the operation
:vartype end_time: datetime
:ivar error: Api error
:vartype error: ~azure.mgmt.compute.v2016_04_30_preview.models.ApiError
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ApiError'},
}
def __init__(self, **kwargs):
super(OperationStatusResponse, self).__init__(**kwargs)
self.name = None
self.status = None
self.start_time = None
self.end_time = None
self.error = None
| mit |
caiocsalvador/whats_the_craic | lib/python3.4/site-packages/django/core/checks/registry.py | 162 | 3098 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.utils.itercompat import is_iterable
class Tags(object):
"""
Built-in tags for internal checks.
"""
admin = 'admin'
caches = 'caches'
compatibility = 'compatibility'
models = 'models'
security = 'security'
signals = 'signals'
templates = 'templates'
urls = 'urls'
class CheckRegistry(object):
def __init__(self):
self.registered_checks = []
self.deployment_checks = []
def register(self, check=None, *tags, **kwargs):
"""
Can be used as a function or a decorator. Register given function
`f` labeled with given `tags`. The function should receive **kwargs
and return list of Errors and Warnings.
Example::
registry = CheckRegistry()
@registry.register('mytag', 'anothertag')
def my_check(apps, **kwargs):
# ... perform checks and collect `errors` ...
return errors
# or
registry.register(my_check, 'mytag', 'anothertag')
"""
kwargs.setdefault('deploy', False)
def inner(check):
check.tags = tags
if kwargs['deploy']:
if check not in self.deployment_checks:
self.deployment_checks.append(check)
elif check not in self.registered_checks:
self.registered_checks.append(check)
return check
if callable(check):
return inner(check)
else:
if check:
tags += (check, )
return inner
def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False):
"""
Run all registered checks and return list of Errors and Warnings.
"""
errors = []
checks = self.get_checks(include_deployment_checks)
if tags is not None:
checks = [check for check in checks
if hasattr(check, 'tags') and set(check.tags) & set(tags)]
for check in checks:
new_errors = check(app_configs=app_configs)
assert is_iterable(new_errors), (
"The function %r did not return a list. All functions registered "
"with the checks registry must return a list." % check)
errors.extend(new_errors)
return errors
def tag_exists(self, tag, include_deployment_checks=False):
return tag in self.tags_available(include_deployment_checks)
def tags_available(self, deployment_checks=False):
return set(chain(*[check.tags for check in self.get_checks(deployment_checks) if hasattr(check, 'tags')]))
def get_checks(self, include_deployment_checks=False):
checks = list(self.registered_checks)
if include_deployment_checks:
checks.extend(self.deployment_checks)
return checks
registry = CheckRegistry()
register = registry.register
run_checks = registry.run_checks
tag_exists = registry.tag_exists
| mit |
rasky/nfstest | packet/transport/tcp.py | 1 | 15755 | #===============================================================================
# Copyright 2012 NetApp, Inc. All Rights Reserved,
# contribution by Jorge Mora <mora@netapp.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#===============================================================================
"""
TCP module
Decode TCP layer.
RFC 793 TRANSMISSION CONTROL PROTOCOL
RFC 2018 TCP Selective Acknowledgment Options
RFC 7323 TCP Extensions for High Performance
"""
import nfstest_config as c
from baseobj import BaseObj
from packet.unpack import Unpack
from packet.application.dns import DNS
from packet.application.rpc import RPC
from packet.application.krb5 import KRB5
from packet.utils import OptionFlags, ShortHex
# Module constants
__author__ = "Jorge Mora (%s)" % c.NFSTEST_AUTHOR_EMAIL
__copyright__ = "Copyright (C) 2012 NetApp, Inc."
__license__ = "GPL v2"
__version__ = "1.5"
TCPflags = {
0: "FIN",
1: "SYN",
2: "RST",
3: "PSH",
4: "ACK",
5: "URG",
6: "ECE",
7: "CWR",
8: "NS",
}
class Stream(BaseObj):
"""TCP stream buffer object"""
# Printing of this object is used for debugging only so don't display buffer
_attrlist = ("last_seq", "next_seq", "seq_wrap", "seq_base", "frag_off")
def __init__(self, seqno):
self.buffer = "" # Keep track of RPC packets spanning multiple TCP packets
self.frag_off = 0 # Keep track of multiple RPC packets within a single TCP packet
self.last_seq = 0 # Last sequence number processed
self.next_seq = 0 # Next sequence number expected
self.seq_wrap = 0 # Keep track when sequence number has wrapped around
self.seq_base = seqno # Base sequence number to convert to relative sequence numbers
self.segments = [] # Array of missing fragments, item: [start seq, end seq]
def add_fragment(self, data, seq):
"""Add fragment data to stream buffer"""
if len(data) == 0:
return
if seq == self.next_seq or len(self.buffer) == 0:
# Append fragment data to stream buffer
self.buffer += data
self.segments = []
elif seq > self.next_seq:
# Previous fragment is missing so fill previous fragment with zeros
size = seq - self.next_seq
self.segments.append([self.next_seq, seq])
self.buffer += "\x00" * size
self.buffer += data
else:
# Fragment is out of order -- found previous missing fragment
off = len(self.buffer) - self.next_seq + seq
datalen = len(data)
size = datalen + off
# Insert fragment where it belongs
self.buffer = self.buffer[:off] + data + self.buffer[size:]
# Remove fragment from segments list
index = 0
for frag in self.segments:
if seq >= frag[0] and seq < frag[1]:
if seq == frag[0] and seq+datalen == frag[1]:
# Full segment matched, so just remove it
self.segments.pop(index)
elif seq == frag[0]:
# Start of segment matched, set new missing start
frag[0] = seq+datalen
elif seq+datalen == frag[1]:
# End of segment matched, set new missing end
frag[1] = seq
else:
# Full segment is within missing segment,
# set new missing end and create a new segment
newfrag = [seq+datalen, frag[1]]
frag[1] = seq
self.segments.insert(index+1, newfrag)
break
index += 1
def missing_fragment(self, seq):
"""Check if given sequence number is within a missing fragment"""
for frag in self.segments:
if seq >= frag[0] and seq < frag[1]:
return True
return False
class Flags(OptionFlags):
"""TCP Option flags"""
_rawfunc = ShortHex
_bitnames = TCPflags
__str__ = OptionFlags.str_flags
class Option(BaseObj):
"""Option object"""
def __init__(self, unpack):
"""Constructor which takes an unpack object as input"""
self.kind = None
try:
self.kind = unpack.unpack_uchar()
if self.kind not in (0,1):
length = unpack.unpack_uchar()
if length > 2:
if self.kind == 2:
# Maximum Segment Size (MSS)
self.mss = unpack.unpack_ushort()
self._attrlist = ("kind", "mss")
elif self.kind == 3:
# Window Scale option (WSopt)
self.wsopt = unpack.unpack_uchar()
self._attrlist = ("kind", "wsopt")
elif self.kind == 5:
# Sack Option Format
self.blocks = []
for i in range((length-2)/8):
left_edge = unpack.unpack_uint()
right_edge = unpack.unpack_uint()
self.blocks.append([left_edge, right_edge])
self._attrlist = ("kind", "blocks")
elif self.kind == 8:
# Timestamps option (TSopt)
self.tsval = unpack.unpack_uint()
self.tsecr = unpack.unpack_uint()
self._attrlist = ("kind", "tsval", "tsecr")
else:
self.data = unpack.read(length-2)
self._attrlist = ("kind", "data")
except:
pass
class TCP(BaseObj):
"""TCP object
Usage:
from packet.transport.tcp import TCP
x = TCP(pktt)
Object definition:
TCP(
src_port = int, # Source port
dst_port = int, # Destination port
seq_number = int, # Sequence number
ack_number = int, # Acknowledgment number
hl = int, # Data offset or header length (32bit words)
header_size = int, # Data offset or header length in bytes
flags = Flags( # TCP flags:
rawflags = int,# Raw flags
FIN = int, # No more data from sender
SYN = int, # Synchronize sequence numbers
RST = int, # Synchronize sequence numbers
PSH = int, # Push function. Asks to push the buffered
# data to the receiving application
ACK = int, # Acknowledgment field is significant
URG = int, # Urgent pointer field is significant
ECE = int, # ECN-Echo has a dual role:
# SYN=1, the TCP peer is ECN capable.
# SYN=0, packet with Congestion Experienced
# flag in IP header set is received during
# normal transmission
CWR = int, # Congestion Window Reduced
NS = int, # ECN-nonce concealment protection
),
window_size = int, # Window size
checksum = int, # Checksum
urgent_ptr = int, # Urgent pointer
seq = int, # Relative sequence number
options = list, # List of TCP options
data = string, # Raw data of payload if unable to decode
)
"""
# Class attributes
_attrlist = ("src_port", "dst_port", "seq_number", "ack_number", "hl",
"header_size", "flags", "window_size", "checksum",
"urgent_ptr", "options", "length", "data")
def __init__(self, pktt):
"""Constructor
Initialize object's private data.
pktt:
Packet trace object (packet.pktt.Pktt) so this layer has
access to the parent layers.
"""
# Decode the TCP layer header
unpack = pktt.unpack
ulist = unpack.unpack(20, "!HHIIHHHH")
self.src_port = ulist[0]
self.dst_port = ulist[1]
self.seq_number = ulist[2]
self.ack_number = ulist[3]
self.hl = ulist[4] >> 12
self.header_size = 4*self.hl
self.flags = Flags(ulist[4] & 0x1FF)
self.window_size = ulist[5]
self.checksum = ShortHex(ulist[6])
self.urgent_ptr = ulist[7]
pktt.pkt.tcp = self
# Stream identifier
ip = pktt.pkt.ip
streamid = "%s:%d-%s:%d" % (ip.src, self.src_port, ip.dst, self.dst_port)
if streamid not in pktt._tcp_stream_map:
# Create TCP stream object
pktt._tcp_stream_map[streamid] = Stream(self.seq_number)
# De-reference stream map
stream = pktt._tcp_stream_map[streamid]
if self.flags.SYN:
# Reset seq_base on SYN
stream.seq_base = self.seq_number
stream.last_seq = stream.seq_wrap
# Convert sequence numbers to relative numbers
seq = self.seq_number - stream.seq_base + stream.seq_wrap
if seq < stream.seq_wrap:
# Sequence number has reached the maximum and wrapped around
stream.seq_wrap += 4294967296
seq += 4294967296
self.seq = seq
if self.header_size > 20:
self.options = []
osize = self.header_size - 20
optunpack = Unpack(unpack.read(osize))
while optunpack.size():
optobj = Option(optunpack)
if optobj.kind == 0:
# End of option list
break
elif optobj.kind > 0:
# Valid option
self.options.append(optobj)
# Save length of TCP segment
self.length = unpack.size()
if seq < stream.last_seq and not stream.missing_fragment(seq):
# This is a re-transmission, do not process
return
self._decode_payload(pktt, stream)
if self.length > 0:
stream.last_seq = seq
stream.next_seq = seq + self.length
def __str__(self):
"""String representation of object
The representation depends on the verbose level set by debug_repr().
If set to 0 the generic object representation is returned.
If set to 1 the representation of the object is condensed:
'TCP 708 -> 2049, seq: 3294175829, ack: 3395739041, ACK,FIN'
If set to 2 the representation of the object also includes the
length of payload and a little bit more verbose:
'src port 708 -> dst port 2049, seq: 3294175829, ack: 3395739041, len: 0, flags: FIN,ACK'
"""
rdebug = self.debug_repr()
if rdebug == 1:
out = "TCP %d -> %d, seq: %d, ack: %d, %s" % \
(self.src_port, self.dst_port, self.seq_number, self.ack_number, self.flags)
elif rdebug == 2:
out = "src port %d -> dst port %d, seq: %d, ack: %d, len: %d, flags: %s" % \
(self.src_port, self.dst_port, self.seq_number, self.ack_number, self.length, self.flags)
else:
out = BaseObj.__str__(self)
return out
def _decode_payload(self, pktt, stream):
"""Decode TCP payload."""
rpc = None
pkt = pktt.pkt
unpack = pktt.unpack
if 53 in [self.src_port, self.dst_port]:
# DNS on port 53
dns = DNS(pktt, proto=6)
if dns:
pkt.dns = dns
return
elif 88 in [self.src_port, self.dst_port]:
# KRB5 on port 88
krb = KRB5(pktt, proto=6)
if krb:
pkt.krb = krb
return
if stream.frag_off > 0 and len(stream.buffer) == 0:
# This RPC packet lies within previous TCP packet,
# Re-position the offset of the data
unpack.seek(unpack.tell() + stream.frag_off)
# Get the total size
sid = unpack.save_state()
size = unpack.size()
# Try decoding the RPC header before using the stream buffer data
# to re-sync the stream
if len(stream.buffer) > 0:
rpc = RPC(pktt, proto=6)
if not rpc:
unpack.restore_state(sid)
sid = unpack.save_state()
if rpc or (size == 0 and len(stream.buffer) > 0 and self.flags.rawflags != 0x10):
# There has been some data lost in the capture,
# to continue decoding next packets, reset stream
# except if this packet is just a TCP ACK (flags = 0x10)
stream.buffer = ""
stream.frag_off = 0
if not rpc:
if len(stream.buffer):
# Concatenate previous fragment
unpack.insert(stream.buffer)
ldata = unpack.size() - 4
# Get RPC header
rpc = RPC(pktt, proto=6)
else:
ldata = size - 4
if not rpc:
return
rpcsize = rpc.fragment_hdr.size
truncbytes = pkt.record.length_orig - pkt.record.length_inc
if truncbytes == 0 and ldata < rpcsize:
# An RPC fragment is missing to decode RPC payload
unpack.restore_state(sid)
stream.add_fragment(unpack.getbytes(), self.seq)
else:
if len(stream.buffer) > 0 or ldata == rpcsize:
stream.frag_off = 0
stream.buffer = ""
# Save RPC layer on packet object
pkt.rpc = rpc
if rpc.type:
# Remove packet call from the xid map since reply has
# already been decoded
pktt._rpc_xid_map.pop(rpc.xid, None)
# Decode NFS layer
rpcload = rpc.decode_payload()
rpcbytes = ldata - unpack.size()
if not rpcload and rpcbytes != rpcsize:
pass
elif unpack.size():
# Save the offset of next RPC packet within this TCP packet
# Data offset is cumulative
stream.frag_off += size - unpack.size()
sid = unpack.save_state()
ldata = unpack.size() - 4
try:
rpc_header = RPC(pktt, proto=6, state=False)
except Exception:
rpc_header = None
if not rpc_header or ldata < rpc_header.fragment_hdr.size:
# Part of next RPC packet is within this TCP packet
# Save the multi-span fragment data
unpack.restore_state(sid)
stream.add_fragment(unpack.getbytes(), self.seq)
else:
# Next RPC packet is entirely within this TCP packet
# Re-position the file pointer to the current offset
pktt.seek(pktt.boffset)
else:
stream.frag_off = 0
| gpl-2.0 |
liorvh/infernal-twin | build/reportlab/src/reportlab/graphics/barcode/eanbc.py | 33 | 11637 | __all__=(
'Ean13BarcodeWidget','isEanString',
)
from reportlab.graphics.shapes import Group, String, Rect
from reportlab.lib import colors
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.validators import isNumber, isColor, isString, Validator, isBoolean
from reportlab.lib.attrmap import *
from reportlab.graphics.charts.areas import PlotArea
from reportlab.lib.units import mm
from reportlab.lib.utils import asNative
#work out a list of manufacturer codes....
_eanNumberSystems = [
('00-13', 'USA & Canada'),
('20-29', 'In-Store Functions'),
('30-37', 'France'),
('40-44', 'Germany'),
('45', 'Japan (also 49)'),
('46', 'Russian Federation'),
('471', 'Taiwan'),
('474', 'Estonia'),
('475', 'Latvia'),
('477', 'Lithuania'),
('479', 'Sri Lanka'),
('480', 'Philippines'),
('482', 'Ukraine'),
('484', 'Moldova'),
('485', 'Armenia'),
('486', 'Georgia'),
('487', 'Kazakhstan'),
('489', 'Hong Kong'),
('49', 'Japan (JAN-13)'),
('50', 'United Kingdom'),
('520', 'Greece'),
('528', 'Lebanon'),
('529', 'Cyprus'),
('531', 'Macedonia'),
('535', 'Malta'),
('539', 'Ireland'),
('54', 'Belgium & Luxembourg'),
('560', 'Portugal'),
('569', 'Iceland'),
('57', 'Denmark'),
('590', 'Poland'),
('594', 'Romania'),
('599', 'Hungary'),
('600-601', 'South Africa'),
('609', 'Mauritius'),
('611', 'Morocco'),
('613', 'Algeria'),
('619', 'Tunisia'),
('622', 'Egypt'),
('625', 'Jordan'),
('626', 'Iran'),
('64', 'Finland'),
('690-692', 'China'),
('70', 'Norway'),
('729', 'Israel'),
('73', 'Sweden'),
('740', 'Guatemala'),
('741', 'El Salvador'),
('742', 'Honduras'),
('743', 'Nicaragua'),
('744', 'Costa Rica'),
('746', 'Dominican Republic'),
('750', 'Mexico'),
('759', 'Venezuela'),
('76', 'Switzerland'),
('770', 'Colombia'),
('773', 'Uruguay'),
('775', 'Peru'),
('777', 'Bolivia'),
('779', 'Argentina'),
('780', 'Chile'),
('784', 'Paraguay'),
('785', 'Peru'),
('786', 'Ecuador'),
('789', 'Brazil'),
('80-83', 'Italy'),
('84', 'Spain'),
('850', 'Cuba'),
('858', 'Slovakia'),
('859', 'Czech Republic'),
('860', 'Yugloslavia'),
('869', 'Turkey'),
('87', 'Netherlands'),
('880', 'South Korea'),
('885', 'Thailand'),
('888', 'Singapore'),
('890', 'India'),
('893', 'Vietnam'),
('899', 'Indonesia'),
('90-91', 'Austria'),
('93', 'Australia'),
('94', 'New Zealand'),
('955', 'Malaysia'),
('977', 'International Standard Serial Number for Periodicals (ISSN)'),
('978', 'International Standard Book Numbering (ISBN)'),
('979', 'International Standard Music Number (ISMN)'),
('980', 'Refund receipts'),
('981-982', 'Common Currency Coupons'),
('99', 'Coupons')
]
manufacturerCodes = {}
for (k, v) in _eanNumberSystems:
words = k.split('-')
if len(words)==2:
fromCode = int(words[0])
toCode = int(words[1])
for code in range(fromCode, toCode+1):
manufacturerCodes[code] = v
else:
manufacturerCodes[int(k)] = v
def nDigits(n):
class _ndigits(Validator):
def test(self,x):
return type(x) is str and len(x)<=n and len([c for c in x if c in "0123456789"])==n
return _ndigits()
class Ean13BarcodeWidget(PlotArea):
codeName = "EAN13"
_attrMap = AttrMap(BASE=PlotArea,
value = AttrMapValue(nDigits(12), desc='the number'),
fontName = AttrMapValue(isString, desc='fontName'),
fontSize = AttrMapValue(isNumber, desc='font size'),
x = AttrMapValue(isNumber, desc='x-coord'),
y = AttrMapValue(isNumber, desc='y-coord'),
barFillColor = AttrMapValue(isColor, desc='bar color'),
barHeight = AttrMapValue(isNumber, desc='Height of bars.'),
barWidth = AttrMapValue(isNumber, desc='Width of bars.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
barStrokeColor = AttrMapValue(isColor, desc='Color of bar borders.'),
textColor = AttrMapValue(isColor, desc='human readable text color'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
quiet = AttrMapValue(isBoolean, desc='if quiet zone to be used'),
lquiet = AttrMapValue(isBoolean, desc='left quiet zone length'),
rquiet = AttrMapValue(isBoolean, desc='right quiet zone length'),
)
_digits=12
_start_right = 7 #for ean-13 left = [0:7] right=[7:13]
_nbars = 113
barHeight = 25.93*mm #millimeters
barWidth = (37.29/_nbars)*mm
humanReadable = 1
_0csw = 1
_1csw = 3
#Left Hand Digits.
_left = ( ("0001101", "0011001", "0010011", "0111101",
"0100011", "0110001", "0101111", "0111011",
"0110111", "0001011",
), #odd left hand digits
("0100111", "0110011", "0011011", "0100001",
"0011101", "0111001", "0000101", "0010001",
"0001001", "0010111"), #even left hand digits
)
_right = ("1110010", "1100110", "1101100", "1000010",
"1011100", "1001110", "1010000", "1000100",
"1001000", "1110100")
quiet = 1
rquiet = lquiet = None
_tail = "101"
_sep = "01010"
_lhconvert={
"0": (0,0,0,0,0,0),
"1": (0,0,1,0,1,1),
"2": (0,0,1,1,0,1),
"3": (0,0,1,1,1,0),
"4": (0,1,0,0,1,1),
"5": (0,1,1,0,0,1),
"6": (0,1,1,1,0,0),
"7": (0,1,0,1,0,1),
"8": (0,1,0,1,1,0),
"9": (0,1,1,0,1,0)
}
fontSize = 8 #millimeters
fontName = 'Helvetica'
textColor = barFillColor = colors.black
barStrokeColor = None
barStrokeWidth = 0
x = 0
y = 0
def __init__(self,value='123456789012',**kw):
value = str(value) if isinstance(value,int) else asNative(value)
self.value=max(self._digits-len(value),0)*'0'+value[:self._digits]
for k, v in kw.items():
setattr(self, k, v)
width = property(lambda self: self.barWidth*(self._nbars-18+self._calc_quiet(self.lquiet)+self._calc_quiet(self.rquiet)))
def wrap(self,aW,aH):
return self.width,self.barHeight
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left
z = ord('0')
for i,c in enumerate(s[1:self._start_right]):
a(_left[cp[i]][ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<55) or (57<i<101))
def _calc_quiet(self,v):
if self.quiet:
if v is None:
v = 9
else:
x = float(max(v,0))/self.barWidth
v = int(x)
if v-x>0: v += 1
else:
v = 0
return v
def draw(self):
g = Group()
gAdd = g.add
barWidth = self.barWidth
width = self.width
barHeight = self.barHeight
x = self.x
y = self.y
gAdd(Rect(x,y,width,barHeight,fillColor=None,strokeColor=None,strokeWidth=0))
s = self.value+self._checkdigit(self.value)
self._lquiet = lquiet = self._calc_quiet(self.lquiet)
rquiet = self._calc_quiet(self.rquiet)
b = [lquiet*'0',self._tail] #the signal string
a = b.append
self._encode_left(s,a)
a(self._sep)
z = ord('0')
_right = self._right
for c in s[self._start_right:]:
a(_right[ord(c)-z])
a(self._tail)
a(rquiet*'0')
fontSize = self.fontSize
barFillColor = self.barFillColor
barStrokeWidth = self.barStrokeWidth
barStrokeColor = self.barStrokeColor
fth = fontSize*1.2
b = ''.join(b)
lrect = None
for i,c in enumerate(b):
if c=="1":
dh = self._short_bar(i) and fth or 0
yh = y+dh
if lrect and lrect.y==yh:
lrect.width += barWidth
else:
lrect = Rect(x,yh,barWidth,barHeight-dh,fillColor=barFillColor,strokeWidth=barStrokeWidth,strokeColor=barStrokeColor)
gAdd(lrect)
else:
lrect = None
x += barWidth
if self.humanReadable: self._add_human_readable(s,gAdd)
return g
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
c = s[0]
w = stringWidth(c,fontName,fontSize)
x = self.x+barWidth*(self._lquiet-8)
y = self.y + 0.2*fth
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor))
x = self.x + (33-9+self._lquiet)*barWidth
c = s[1:7]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x += 47*barWidth
c = s[7:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
def _checkdigit(cls,num):
z = ord('0')
iSum = cls._0csw*sum([(ord(x)-z) for x in num[::2]]) \
+ cls._1csw*sum([(ord(x)-z) for x in num[1::2]])
return chr(z+((10-(iSum%10))%10))
_checkdigit=classmethod(_checkdigit)
class Ean8BarcodeWidget(Ean13BarcodeWidget):
codeName = "EAN8"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(7), desc='the number'),
)
_start_right = 4 #for ean-13 left = [0:7] right=[7:13]
_nbars = 85
_digits=7
_0csw = 3
_1csw = 1
def _encode_left(self,s,a):
cp = self._lhconvert[s[0]] #convert the left hand numbers
_left = self._left[0]
z = ord('0')
for i,c in enumerate(s[0:self._start_right]):
a(_left[ord(c)-z])
def _short_bar(self,i):
i += 9 - self._lquiet
return self.humanReadable and ((12<i<41) or (43<i<73))
def _add_human_readable(self,s,gAdd):
barWidth = self.barWidth
fontSize = self.fontSize
textColor = self.textColor
fontName = self.fontName
fth = fontSize*1.2
# draw the num below the line.
y = self.y + 0.2*fth
x = (26.5-9+self._lquiet)*barWidth
c = s[0:4]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
x = (59.5-9+self._lquiet)*barWidth
c = s[4:]
gAdd(String(x,y,c,fontName=fontName,fontSize=fontSize,fillColor=textColor,textAnchor='middle'))
class UPCA(Ean13BarcodeWidget):
codeName = "UPCA"
_attrMap = AttrMap(BASE=Ean13BarcodeWidget,
value = AttrMapValue(nDigits(11), desc='the number'),
)
_start_right = 6
_digits = 11
_0csw = 3
_1csw = 1
_nbars = 1+7*11+2*3+5
| gpl-3.0 |
arjunbm13/youtube-dl | youtube_dl/extractor/ign.py | 107 | 5415 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class IGNIE(InfoExtractor):
"""
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
Some videos of it.ign.com are also supported
"""
_VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)'
IE_NAME = 'ign.com'
_CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config'
_DESCRIPTION_RE = [
r'<span class="page-object-description">(.+?)</span>',
r'id="my_show_video">.*?<p>(.*?)</p>',
r'<meta name="description" content="(.*?)"',
]
_TESTS = [
{
'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
'md5': 'eac8bdc1890980122c3b66f14bdd02e9',
'info_dict': {
'id': '8f862beef863986b2785559b9e1aa599',
'ext': 'mp4',
'title': 'The Last of Us Review',
'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
}
},
{
'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
'info_dict': {
'id': '100-little-things-in-gta-5-that-will-blow-your-mind',
},
'playlist': [
{
'info_dict': {
'id': '5ebbd138523268b93c9141af17bec937',
'ext': 'mp4',
'title': 'GTA 5 Video Review',
'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
},
},
{
'info_dict': {
'id': '638672ee848ae4ff108df2a296418ee2',
'ext': 'mp4',
'title': '26 Twisted Moments from GTA 5 in Slow Motion',
'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
},
},
],
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
'md5': '618fedb9c901fd086f6f093564ef8558',
'info_dict': {
'id': '078fdd005f6d3c02f63d795faa1b984f',
'ext': 'mp4',
'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
'description': (
'Giant skeletons, bloody hunts, and captivating'
' natural beauty take our breath away.'
),
},
},
]
def _find_video_id(self, webpage):
res_id = [
r'"video_id"\s*:\s*"(.*?)"',
r'class="hero-poster[^"]*?"[^>]*id="(.+?)"',
r'data-video-id="(.+?)"',
r'<object id="vid_(.+?)"',
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
]
return self._search_regex(res_id, webpage, 'video id')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name_or_id = mobj.group('name_or_id')
page_type = mobj.group('type')
webpage = self._download_webpage(url, name_or_id)
if page_type != 'video':
multiple_urls = re.findall(
'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
return {
'_type': 'playlist',
'id': name_or_id,
'entries': entries,
}
video_id = self._find_video_id(webpage)
result = self._get_video_info(video_id)
description = self._html_search_regex(self._DESCRIPTION_RE,
webpage, 'video description', flags=re.DOTALL)
result['description'] = description
return result
def _get_video_info(self, video_id):
config_url = self._CONFIG_URL_TEMPLATE % video_id
config = self._download_json(config_url, video_id)
media = config['playlist']['media']
return {
'id': media['metadata']['videoId'],
'url': media['url'],
'title': media['metadata']['title'],
'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'),
}
class OneUPIE(IGNIE):
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
IE_NAME = '1up.com'
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
_TESTS = [{
'url': 'http://gamevideos.1up.com/video/id/34976.html',
'md5': '68a54ce4ebc772e4b71e3123d413163d',
'info_dict': {
'id': '34976',
'ext': 'mp4',
'title': 'Sniper Elite V2 - Trailer',
'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
result = super(OneUPIE, self)._real_extract(url)
result['id'] = mobj.group('name_or_id')
return result
| unlicense |
drpaneas/linuxed.gr | lib/python2.7/site-packages/Crypto/SelfTest/Random/Fortuna/test_FortunaAccumulator.py | 116 | 8612 | # -*- coding: utf-8 -*-
#
# SelfTest/Random/Fortuna/test_FortunaAccumulator.py: Self-test for the FortunaAccumulator module
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-tests for Crypto.Random.Fortuna.FortunaAccumulator"""
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
import unittest
from binascii import b2a_hex
class FortunaAccumulatorTests(unittest.TestCase):
def setUp(self):
global FortunaAccumulator
from Crypto.Random.Fortuna import FortunaAccumulator
def test_FortunaPool(self):
"""FortunaAccumulator.FortunaPool"""
pool = FortunaAccumulator.FortunaPool()
self.assertEqual(0, pool.length)
self.assertEqual("5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456", pool.hexdigest())
pool.append(b('abc'))
self.assertEqual(3, pool.length)
self.assertEqual("4f8b42c22dd3729b519ba6f68d2da7cc5b2d606d05daed5ad5128cc03e6c6358", pool.hexdigest())
pool.append(b("dbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"))
self.assertEqual(56, pool.length)
self.assertEqual(b('0cffe17f68954dac3a84fb1458bd5ec99209449749b2b308b7cb55812f9563af'), b2a_hex(pool.digest()))
pool.reset()
self.assertEqual(0, pool.length)
pool.append(b('a') * 10**6)
self.assertEqual(10**6, pool.length)
self.assertEqual(b('80d1189477563e1b5206b2749f1afe4807e5705e8bd77887a60187a712156688'), b2a_hex(pool.digest()))
def test_which_pools(self):
"""FortunaAccumulator.which_pools"""
# which_pools(0) should fail
self.assertRaises(AssertionError, FortunaAccumulator.which_pools, 0)
self.assertEqual(FortunaAccumulator.which_pools(1), [0])
self.assertEqual(FortunaAccumulator.which_pools(2), [0, 1])
self.assertEqual(FortunaAccumulator.which_pools(3), [0])
self.assertEqual(FortunaAccumulator.which_pools(4), [0, 1, 2])
self.assertEqual(FortunaAccumulator.which_pools(5), [0])
self.assertEqual(FortunaAccumulator.which_pools(6), [0, 1])
self.assertEqual(FortunaAccumulator.which_pools(7), [0])
self.assertEqual(FortunaAccumulator.which_pools(8), [0, 1, 2, 3])
for i in range(1, 32):
self.assertEqual(FortunaAccumulator.which_pools(2L**i-1), [0])
self.assertEqual(FortunaAccumulator.which_pools(2L**i), range(i+1))
self.assertEqual(FortunaAccumulator.which_pools(2L**i+1), [0])
self.assertEqual(FortunaAccumulator.which_pools(2L**31), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**32), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**33), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**34), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**35), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**36), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**64), range(32))
self.assertEqual(FortunaAccumulator.which_pools(2L**128), range(32))
def test_accumulator(self):
"""FortunaAccumulator.FortunaAccumulator"""
fa = FortunaAccumulator.FortunaAccumulator()
# This should fail, because we haven't seeded the PRNG yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Spread some test data across the pools (source number 42)
# This would be horribly insecure in a real system.
for p in range(32):
fa.add_random_event(42, p, b("X") * 32)
self.assertEqual(32+2, fa.pools[p].length)
# This should still fail, because we haven't seeded the PRNG with 64 bytes yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Add more data
for p in range(32):
fa.add_random_event(42, p, b("X") * 32)
self.assertEqual((32+2)*2, fa.pools[p].length)
# The underlying RandomGenerator should get seeded with Pool 0
# s = SHAd256(chr(42) + chr(32) + "X"*32 + chr(42) + chr(32) + "X"*32)
# = SHA256(h'edd546f057b389155a31c32e3975e736c1dec030ddebb137014ecbfb32ed8c6f')
# = h'aef42a5dcbddab67e8efa118e1b47fde5d697f89beb971b99e6e8e5e89fbf064'
# The counter and the key before reseeding is:
# C_0 = 0
# K_0 = "\x00" * 32
# The counter after reseeding is 1, and the new key after reseeding is
# C_1 = 1
# K_1 = SHAd256(K_0 || s)
# = SHA256(h'0eae3e401389fab86640327ac919ecfcb067359d95469e18995ca889abc119a6')
# = h'aafe9d0409fbaaafeb0a1f2ef2014a20953349d3c1c6e6e3b962953bea6184dd'
# The first block of random data, therefore, is
# r_1 = AES-256(K_1, 1)
# = AES-256(K_1, h'01000000000000000000000000000000')
# = h'b7b86bd9a27d96d7bb4add1b6b10d157'
# The second block of random data is
# r_2 = AES-256(K_1, 2)
# = AES-256(K_1, h'02000000000000000000000000000000')
# = h'2350b1c61253db2f8da233be726dc15f'
# The third and fourth blocks of random data (which become the new key) are
# r_3 = AES-256(K_1, 3)
# = AES-256(K_1, h'03000000000000000000000000000000')
# = h'f23ad749f33066ff53d307914fbf5b21'
# r_4 = AES-256(K_1, 4)
# = AES-256(K_1, h'04000000000000000000000000000000')
# = h'da9667c7e86ba247655c9490e9d94a7c'
# K_2 = r_3 || r_4
# = h'f23ad749f33066ff53d307914fbf5b21da9667c7e86ba247655c9490e9d94a7c'
# The final counter value is 5.
self.assertEqual("aef42a5dcbddab67e8efa118e1b47fde5d697f89beb971b99e6e8e5e89fbf064",
fa.pools[0].hexdigest())
self.assertEqual(None, fa.generator.key)
self.assertEqual(0, fa.generator.counter.next_value())
result = fa.random_data(32)
self.assertEqual(b("b7b86bd9a27d96d7bb4add1b6b10d157" "2350b1c61253db2f8da233be726dc15f"), b2a_hex(result))
self.assertEqual(b("f23ad749f33066ff53d307914fbf5b21da9667c7e86ba247655c9490e9d94a7c"), b2a_hex(fa.generator.key))
self.assertEqual(5, fa.generator.counter.next_value())
def test_accumulator_pool_length(self):
"""FortunaAccumulator.FortunaAccumulator minimum pool length"""
fa = FortunaAccumulator.FortunaAccumulator()
# This test case is hard-coded to assume that FortunaAccumulator.min_pool_size is 64.
self.assertEqual(fa.min_pool_size, 64)
# The PRNG should not allow us to get random data from it yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Add 60 bytes, 4 at a time (2 header + 2 payload) to each of the 32 pools
for i in range(15):
for p in range(32):
# Add the bytes to the pool
fa.add_random_event(2, p, b("XX"))
# The PRNG should not allow us to get random data from it yet
self.assertRaises(AssertionError, fa.random_data, 1)
# Add 4 more bytes to pool 0
fa.add_random_event(2, 0, b("XX"))
# We should now be able to get data from the accumulator
fa.random_data(1)
def get_tests(config={}):
from Crypto.SelfTest.st_common import list_test_cases
return list_test_cases(FortunaAccumulatorTests)
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/apps/headphones/lib/requests/packages/urllib3/contrib/pyopenssl.py | 153 | 9905 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| gpl-2.0 |
fzalkow/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
kerr-huang/SL4A | python/src/Tools/pybench/Imports.py | 45 | 2947 | from pybench import Test
# First imports:
import os
import package.submodule
class SecondImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondPackageImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondSubmoduleImport(Test):
version = 2.0
operations = 5 * 5
rounds = 40000
def test(self):
for i in xrange(self.rounds):
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
def calibrate(self):
for i in xrange(self.rounds):
pass
| apache-2.0 |
alama/PSO2Proxy | proxy/ShipProxy.py | 2 | 7707 | import calendar
from config import noisy as verbose
from data import blocks
from data import clients
import datetime
import packets
import plugins.plugins as plugin_manager
import struct
from twisted.internet import protocol
from twisted.internet import reactor
class ShipProxy(protocol.Protocol):
def __init__(self):
pass
peer = None
psoClient = False
bufPacket = None
loaded = False
changingBlocks = False
connTimestamp = None
playerId = None
myUsername = None
packetCount = 0
readBuffer = ''
c4crypto = None
extendedData = {}
def set_peer(self, peer):
self.peer = peer
def set_is_client(self, is_client):
self.psoClient = is_client
def connectionLost(self, reason=protocol.connectionDone):
if self.peer is not None:
self.peer.transport.loseConnection()
self.peer = None
if self.playerId is not None and self.psoClient:
for f in plugin_manager.onConnectionLoss:
f(self)
if self.playerId is not None and not self.changingBlocks:
for f in plugin_manager.onClientRemove:
f(self)
clients.remove_client(self)
if self.psoClient and self.myUsername is not None:
if self.changingBlocks:
print("[ShipProxy] %s is changing blocks." % self.myUsername)
else:
print("[ShipProxy] %s logged out." % self.myUsername)
elif self.psoClient and self.myUsername is None:
print("[ShipProxy] Client at %s lost connection." % self.transport.getPeer().host)
def send_crypto_packet(self, data):
if self.c4crypto is not None and self.peer is not None:
if verbose:
print("[ShipProxy] Sending %s a constructed packet..." % self.transport.getPeer().host)
data = self.peer.c4crypto.encrypt(data)
self.transport.write(data)
def dataReceived(self, data):
if verbose:
print("[ShipProxy] [%i] Received data from %s!" % (self.packetCount, self.transport.getPeer().host,))
encryption_enabled = (self.c4crypto is not None)
if encryption_enabled:
data = self.c4crypto.decrypt(data)
self.readBuffer += data
while len(self.readBuffer) >= 8:
packet_size = struct.unpack_from('i', self.readBuffer)[0]
packet_type = struct.unpack_from('BB', self.readBuffer, 4)
# If the packets reported size is less than a normal packet header, wrap it up to 8 as it should NEVER be that way.
# This prevents an infinite loop.
if packet_size < 8:
print("[ShipProxy] Warning! Got invalid packet size %i. Resetting to 8 to prevent infinite loop..." % packet_size)
packet_size = 8
if verbose:
print("[ShipProxy] [%i] Received packet with size %i, id %x:%x" % (
self.packetCount, packet_size, packet_type[0], packet_type[1]))
if len(self.readBuffer) < packet_size:
if verbose:
print("[ShipProxy] [%i] Buffer only contains %i, waiting for more data." % (
self.packetCount, len(self.readBuffer)))
break
packet = self.readBuffer[:packet_size]
self.readBuffer = self.readBuffer[packet_size:]
if packet is not None:
for f in plugin_manager.rawPacketFunctions:
packet = f(self, packet, packet_type[0], packet_type[1])
try:
packet_handler = packets.packetList[packet_type]
packet = packet_handler(self, packet)
except KeyError:
if verbose:
print("[ShipProxy] No packet function for id %x:%x, using default functionality..." % (
packet_type[0], packet_type[1]))
if (packet_type[0], packet_type[1]) in plugin_manager.packetFunctions:
for f in plugin_manager.packetFunctions[(packet_type[0], packet_type[1])]:
if packet is not None:
packet = f(self, packet)
if packet is None:
return
if self.playerId is not None:
if self.playerId not in clients.connectedClients: # Inital add
clients.add_client(self)
self.loaded = True
for f in plugin_manager.onInitialConnection:
f(self)
elif not self.loaded:
clients.populate_data(self)
for f in plugin_manager.onConnection:
f(self)
if encryption_enabled:
packet = self.c4crypto.encrypt(packet)
else:
# check if encryption was newly enabled while parsing this packet
# if it was, then decrypt any packets that may be waiting in the buffer
if self.c4crypto is not None:
encryption_enabled = True
self.readBuffer = self.c4crypto.decrypt(self.readBuffer)
self.peer.transport.write(packet)
self.packetCount += 1
self.peer.packetCount = self.packetCount
class ProxyClient(ShipProxy):
def connectionMade(self):
self.peer.set_peer(self)
print("[ShipProxy] Connected to block server!")
utctime = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
self.connTimestamp = utctime
self.peer.connTimestamp = utctime
self.peer.transport.resumeProducing()
self.transport.setTcpNoDelay(True)
class ProxyClientFactory(protocol.ClientFactory):
noisy = False
def __init__(self):
self.noisy = verbose
self.protocol = ProxyClient
self.server = None
def set_server(self, server):
self.server = server
def buildProtocol(self, *args, **kw):
the_protocol = protocol.ClientFactory.buildProtocol(self, *args, **kw)
the_protocol.set_peer(self.server)
return the_protocol
def clientConnectionFailed(self, connector, reason):
print("[ShipProxy] Connection to server failed... %s" % (reason, ))
self.server.transport.loseConnection()
class ProxyServer(ShipProxy):
reactor = None
def connectionMade(self):
# Don't read anything from the connecting client until we have
# somewhere to send it to.
self.transport.pauseProducing()
print("[ShipProxy] New client connected!")
port = self.transport.getHost().port
print("[ShipProxy] Client is looking for block on port %i..." % port)
if port not in blocks.blockList:
print("[ShipProxy] Could not find a block for port %i in the cache! Defaulting to block 5..." % port)
port = 12205
address = "210.189.208.21"
else:
print("[ShipProxy] Found address %s for port %i, named %s" % (
blocks.blockList[port][0], port, blocks.blockList[port][1]))
address = blocks.blockList[port][0]
self.set_is_client(True)
client = ProxyClientFactory()
client.set_server(self)
self.reactor = reactor
self.reactor.connectTCP(address, port if port < 13000 else port - 1000, client, 60)
self.transport.setTcpNoDelay(True)
class ProxyFactory(protocol.Factory):
noisy = False
"""Factory for port forwarder."""
def __init__(self):
self.noisy = verbose
self.protocol = ProxyServer
def buildProtocol(self, address):
return ProxyServer()
| agpl-3.0 |
lbjay/cds-invenio | modules/websearch/lib/websearch_external_collections_parser.py | 4 | 20332 | # -*- coding: utf-8 -*-
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This is a collection of parsers for external search engines.
Each parser try to extract results from a web page returned by an external search
engine.
"""
__revision__ = "$Id$"
import re
#from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.config import CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_MAXRESULTS
CFG_EXTERNAL_COLLECTION_MAXRESULTS = CFG_WEBSEARCH_EXTERNAL_COLLECTION_SEARCH_MAXRESULTS
from invenio.bibformat import format_record
from invenio.websearch_external_collections_getter import fetch_url_content
import cgi
re_href = re.compile(r'<a[^>]*href="?([^">]*)"?[^>]*>', re.IGNORECASE)
re_img = re.compile(r'<img[^>]*src="?([^">]*)"?[^>]*>', re.IGNORECASE)
def correct_url(htmlcode, host, path):
"""This function is used to correct urls in html code.
>>> correct_url('<a href="hello.html">', 'www.google.com', 'search/')
'<a href="http://www.google.com/search/hello.html">'
"""
htmlcode = correct_url_with_regex(htmlcode, host, path, re_href)
htmlcode = correct_url_with_regex(htmlcode, host, path, re_img)
return htmlcode
def correct_url_with_regex(htmlcode, host, path, regex):
"""Correct urls in html code. The url is found using the regex given."""
url_starts = []
results = regex.finditer(htmlcode)
for result in results:
url = result.group(1)
if not url.startswith('http://'):
url_starts.append(result.start(1))
url_starts.reverse()
for url_start in url_starts:
if htmlcode[url_start] == '/':
htmlcode = htmlcode[:url_start] + "http://" + host + htmlcode[url_start:]
else:
htmlcode = htmlcode[:url_start] + "http://" + host + "/" + path + htmlcode[url_start:]
return htmlcode
class ExternalCollectionHit:
"""Hold a result."""
def __init__(self, html=None):
self.html = html
class ExternalCollectionResultsParser(object):
"""Mother class for parsers."""
num_results_regex = None
nbrecs_regex = None
nbrecs_url = None
def __init__(self, host='', path=''):
self.buffer = ""
self.results = []
self.host = host
self.path = path
self.clean()
def clean(self):
"""Clean buffer and results to be able to parse a new web page."""
self.buffer = ""
self.results = []
def feed(self, data):
"""Feed buffer with data that will be parse later."""
self.buffer += data
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse the buffer. Set an optional output format."""
pass
def add_html_result(self, html, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Add a new html code as result. The urls in the html code will be corrected."""
if not html:
return
if len(self.results) >= limit:
return
html = correct_url(html, self.host, self.path) + '\n'
result = ExternalCollectionHit(html)
self.results.append(result)
def parse_num_results(self):
"""Parse the buffer with the num_results_regex to extract the number of records found.
This will be returned as a formated string."""
if self.num_results_regex is None:
return None
list_matchs = self.num_results_regex.finditer(self.buffer)
for match in list_matchs:
return int(match.group(1).replace(',', ''))
return None
def parse_nbrecs(self, timeout):
"""Fetch and parse the contents of the nbrecs url with the nbrecs_regex to extract the total
number of records. This will be returned as a formated string."""
if self.nbrecs_regex is None:
return None
html = fetch_url_content([self.nbrecs_url], timeout)
try:
if len(html) == 1:
matches = self.nbrecs_regex.search(html[0])
return int(matches.group(1).replace(',', ''))
else: return None
# This last else should never occur. It means the list html has more (or less) than 1 elements,
# which is impossible since the fetch_url_content(url) function always returns a list with as many
# elements as the list's it was fed with
except AttributeError:
# This means that the pattern did not match anything, therefore the matches.group(1) raised the exception
return -1
except TypeError:
# This means that the pattern was ran on None instead of string or buffer, therefore the
# self.nbrecs_regex.search(html[0]) raised the exception, as html = [None]
return -2
def parse_and_get_results(self, data, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, feedonly=False, parseonly=False):
"""Parse given data and return results."""
# parseonly = True just in case we only want to parse the data and return the results
# ex. the bufffer has already been fed
if not parseonly:
self.clean()
self.feed(data)
# feedonly = True just in case we just want to feed the buffer with the new data
# ex. the data will be used only to calculate the number of results
if not feedonly:
self.parse(of, req, limit)
return self.results
def buffer_decode_from(self, charset):
"""Convert the buffer to UTF-8 from the specified charset. Ignore errors."""
try:
self.buffer = self.buffer.decode(charset, 'ignore').encode('utf-8', 'ignore')
except:
pass
class CDSIndicoCollectionResutsParser(ExternalCollectionResultsParser):
"""Parser for CDS Indico"""
num_results_regex = re.compile(r'<strong>([0-9]+?)</strong> records found')
result_regex = re.compile(r'<tr><td valign="top" align="right" style="white-space: nowrap;">\s*<input name="recid" type="checkbox" value="[0-9]+" \/>\s*([0-9]+\.)\s*</td><td valign="top">(.*?)<div class="moreinfo">.*?</div></td></tr>', re.MULTILINE + re.DOTALL)
def __init__(self, host="", path=""):
super(CDSIndicoCollectionResutsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
results = self.result_regex.finditer(self.buffer)
for result in results:
num = result.group(1)
html = result.group(2)
self.add_html_result(num + ' ' + html + '<br />', limit)
class KISSExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""Parser for Kiss."""
num_results_regex = re.compile(r'<pre><b> ([0-9]+?) records matched</b></pre>')
def __init__(self, host="www-lib.kek.jp", path="cgi-bin/"):
super(KISSExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
self.buffer_decode_from('Shift_JIS')
elements = self.buffer.split("<DL>")
if len(elements) <= 1:
return
for element in elements[1:]:
if len(self.results) >= CFG_EXTERNAL_COLLECTION_MAXRESULTS:
return
end_index = element.find('</DL>')
if end_index != -1:
element = element[:end_index + 4]
self.add_html_result(element + '<br /><br />', limit)
class KISSBooksExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""Parser for Kiss books."""
line = re.compile(r'<TR>(.*?)</TR>')
title = re.compile(r'<TR>[ ]+<TD valign="top">([0-9]+)\)</TD>[ ]+<TD><A HREF="?(.*)"?>[ ]*(.*?)[ ]*</A></TD>[ ]+</TR>')
info_line = re.compile(r'[ ]*<TR>[ ]*<TD></TD>[ ]*<TD>(.*?)</TD>.*</TR>')
num_results_regex = re.compile(r'<B> (?:Books|Journals) ([0-9]+?) </B>')
def __init__(self, host="www-lib.kek.jp", path="cgi-bin/"):
super(KISSBooksExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
self.buffer_decode_from('Shift_JIS')
self.buffer = self.buffer.replace('\n', ' ')
html = ""
results_to_parse = self.line.finditer(self.buffer)
for result in results_to_parse:
if len(self.results) >= CFG_EXTERNAL_COLLECTION_MAXRESULTS:
return
data = result.group()
title_match = self.title.match(data)
if title_match:
self.add_html_result(html, limit)
num = title_match.group(1)
url = title_match.group(2)
title = title_match.group(3)
html = num + ') <a href=http://' + self.host + url + ">" + title + "</a><br />"
else:
info_line_match = self.info_line.match(data)
if info_line_match:
info = info_line_match.group(1)
html += info + '<br />'
self.add_html_result(html, limit)
class GoogleExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""Parser for Google"""
num_results_regex = re.compile(r'of about <b>([0-9,]+?)</b>')
def __init__(self, host = "www.google.com", path=""):
super(GoogleExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
elements = self.buffer.split("<div class=g>")
if len(elements) <= 1:
return
for element in elements[1:]:
end_index = element.find('</table>')
if end_index != -1:
element = element[:end_index + 8]
self.add_html_result(element, limit)
class GoogleScholarExternalCollectionResultsParser(GoogleExternalCollectionResultsParser):
"""Parser for Google Scholar."""
def __init__(self, host = "scholar.google.com", path=""):
super(GoogleScholarExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
elements = self.buffer.split("<p class=g>")
if len(elements) <= 1:
return
for element in elements[1:-1]:
end_index = element.find('</table>')
if end_index != -1:
element = element[:end_index + 8]
self.add_html_result(element + '<br />', limit)
class GoogleBooksExternalCollectionResultsParser(GoogleExternalCollectionResultsParser):
"""Parser for Google Books."""
num_results_regex = re.compile(r' with <b>([0-9]+?)</b> pages on ')
def __init__(self, host = "books.google.com", path=""):
super(GoogleBooksExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
elements = self.buffer.split('<table class=rsi><tr><td class="covertd">')
if len(elements) <= 1:
return
for element in elements[1:-1]:
self.add_html_result(element, limit)
class SPIRESExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""Parser for SPIRES."""
num_results_regex = re.compile(r'Paper <b>[0-9]+</b> to <b>[0-9]+</b> of <b>([0-9]+)</b>')
def __init__(self, host="www.slac.stanford.edu", path="spires/find/hep/"):
super(SPIRESExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
elements = self.buffer.split('<p>')
if len(elements) <= 2:
return
for element in elements[1:-1]:
self.add_html_result(element, limit)
class SCIRUSExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""Parser for SCIRUS."""
num_results_regex = re.compile(r'<b>([0-9,]+) total</b> ')
result_separator = re.compile(r'<td width="100%" valign="top" colspan="2">[ ]*(.*?)</td>[ ]*</tr>[ ]*</table>')
result_decode = re.compile('[ ]*(.*?)[ ]*<font class="filesize">.*?<br />[ ]*(.*?)[ ]*<br />[ ]*(.*?)[ ]*</td>.*?<br />[ ]*(.*)[ ]*')
cleaning = re.compile('(<img .*?>|</td>|</tr>|<td .*?>|<tr.*?>)')
def __init__(self, host='www.scirus.com', path='srsapp/'):
super(SCIRUSExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
data = self.buffer.replace('\n', ' ')
for element in self.result_separator.finditer(data):
data = element.group(1)
parsed_line = self.result_decode.match(data)
if parsed_line is not None:
link = parsed_line.group(1)
date = parsed_line.group(2)
comments = parsed_line.group(3)
similar = parsed_line.group(4)
html = "%(link)s - %(date)s <br /> %(comments)s <br /> %(similar)s <br />" % {'link' : link,
'date' : date, 'comments' : comments, 'similar' : similar}
else:
html = self.cleaning.sub("", data) + '<br />'
self.add_html_result(html, limit)
class CiteSeerExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""Parser for CiteSeer."""
num_results_regex = re.compile(r'<br />(?:More than |)([0-9]+)(?: documents found.| results)')
result_separator = re.compile(r'<!--RIS-->.*?<!--RIE-->', re.DOTALL)
def __init__(self, host='', path=''):
super(CiteSeerExternalCollectionResultsParser, self).__init__(host, path)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
for element in self.result_separator.finditer(self.buffer):
self.add_html_result(element.group() + '<br />', limit)
class CDSInvenioHTMLExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""HTML brief (hb) Parser for Invenio"""
def __init__(self, params):
self.buffer = ""
self.results = []
self.clean()
self.num_results_regex_str = None
self.nbrecs_regex_str = None
for (name, value) in params.iteritems():
setattr(self, name, value)
if self.num_results_regex_str:
self.num_results_regex = re.compile(self.num_results_regex_str)
if self.nbrecs_regex_str:
self.nbrecs_regex = re.compile(self.nbrecs_regex_str, re.IGNORECASE)
def parse(self, of=None, req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records."""
# the patterns :
# level_a : select only the results
level_a_pat = re.compile(r'<form[^>]*basket[^>]*?>.*?<table>(.*?)</table>.*?</form>', re.DOTALL + re.MULTILINE + re.IGNORECASE)
# level_b : purge html from the basket input fields
level_b_pat = re.compile(r'<input[^>]*?/>', re.DOTALL + re.MULTILINE + re.IGNORECASE)
# level_c : separate the results from one another
level_c_pat = re.compile(r'(<tr>.*?</tr>)', re.DOTALL + re.MULTILINE + re.IGNORECASE)
# the long way :
#level_a_res = level_a_pat.search(self.buffer)
#level_ab_res = level_a_res.group(1)
#level_b_res = level_b_pat.sub('', level_ab_res)
#level_c_res = level_c_pat.finditer(level_b_res)
# the short way :
try:
results = level_c_pat.finditer(level_b_pat.sub('', level_a_pat.search(self.buffer).group(1)))
for result in results:
# each result is placed in each own table since it already has its rows and cells defined
self.add_html_result('<table>' + result.group(1) + '</table>', limit)
except AttributeError:
# in case there were no results found an Attribute error is raised
pass
class CDSInvenioXMLExternalCollectionResultsParser(ExternalCollectionResultsParser):
"""XML (xm) parser for Invenio"""
def __init__(self, params):
self.buffer = ""
self.results = []
self.clean()
self.num_results_regex_str = None
self.nbrecs_regex_str = None
for (name, value) in params.iteritems():
setattr(self, name, value)
if self.num_results_regex_str:
self.num_results_regex = re.compile(self.num_results_regex_str)
if self.nbrecs_regex_str:
self.nbrecs_regex = re.compile(self.nbrecs_regex_str, re.IGNORECASE)
def parse(self, of='hb', req=None, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Parse buffer to extract records. Format the records using the selected output format."""
(recids, records) = self.parse_and_extract_records(of)
if req and cgi.parse_qs(req.args).has_key('jrec'):
counter = int(cgi.parse_qs(req.args)['jrec'][0]) - 1
else:
counter = 0
for recid in recids:
counter += 1
if of == 'hb':
html = """
<tr><td valign="top" align="right" style="white-space: nowrap;">
<input name="recid" type="checkbox" value="%(recid)s" />
%(counter)s.
</td><td valign="top">%(record)s</td></tr>
""" % {'recid': recid,
'counter': counter,
'record': records[recid]}
elif of == 'hd':
# HTML detailed (hd) is not supported yet
# TODO: either disable the hd output format or print it out correctly
html = """"""
elif of == 'xm':
html = records[recid]
else:
html = None
if html:
self.add_html_result(html, limit)
def parse_and_extract_records(self, of='hb'):
"""Parse the buffer and return a list of the recids and a
dictionary with key:value pairs like the following
recid:formated record with the selected output format"""
# the patterns :
# separate the records from one another
record_pat = re.compile(r'(<record.*?>.*?</record>)', re.DOTALL + re.MULTILINE + re.IGNORECASE)
# extract the recid
recid_pat = re.compile(r'<controlfield tag="001">([0-9]+?)</controlfield>', re.DOTALL + re.MULTILINE + re.IGNORECASE)
if not of:
of='hb'
try:
results = record_pat.finditer(self.buffer)
records = {}
recids = []
for result in results:
xml_record = result.group(1)
recid = recid_pat.search(xml_record).group(1)
recids.append(recid)
if of != 'xm':
records[recid] = format_record(None, of, xml_record=xml_record)
elif of == 'xm':
records[recid] = xml_record
return (recids, records)
except AttributeError:
# in case there were no results found an Attribute error is raised
return ([], {})
| gpl-2.0 |
aurofable/medhack-server | venv/lib/python2.7/site-packages/jwt/__init__.py | 1 | 2109 | """ JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
signature = signing_methods[algorithm](signing_input, key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
if not signature == signing_methods[header['alg']](signing_input, key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
| mit |
poornimakshirsagar/sos | sos/plugins/anacron.py | 12 | 1102 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Anacron(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Anacron job scheduling service"""
plugin_name = 'anacron'
profiles = ('system',)
# anacron may be provided by anacron, cronie-anacron etc.
# just look for the configuration file which is common
files = ('/etc/anacrontab',)
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
alianmohammad/pd-gem5-latest | tests/configs/o3-timing.py | 37 | 2855 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from m5.defines import buildEnv
from base_config import *
from arm_generic import *
from O3_ARM_v7a import O3_ARM_v7a_3
# If we are running ARM regressions, use a more sensible CPU
# configuration. This makes the results more meaningful, and also
# increases the coverage of the regressions.
if buildEnv['TARGET_ISA'] == "arm":
root = ArmSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_x64,
cpu_class=O3_ARM_v7a_3).create_root()
else:
root = BaseSESystemUniprocessor(mem_mode='timing', mem_class=DDR3_1600_x64,
cpu_class=DerivO3CPU).create_root()
| bsd-3-clause |
AndrewSmart/audacity | lib-src/lv2/sratom/waflib/Tools/dbus.py | 318 | 1142 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task,Errors
from waflib.TaskGen import taskgen_method,before_method
@taskgen_method
def add_dbus_file(self,filename,prefix,mode):
if not hasattr(self,'dbus_lst'):
self.dbus_lst=[]
if not'process_dbus'in self.meths:
self.meths.append('process_dbus')
self.dbus_lst.append([filename,prefix,mode])
@before_method('apply_core')
def process_dbus(self):
for filename,prefix,mode in getattr(self,'dbus_lst',[]):
node=self.path.find_resource(filename)
if not node:
raise Errors.WafError('file not found '+filename)
tsk=self.create_task('dbus_binding_tool',node,node.change_ext('.h'))
tsk.env.DBUS_BINDING_TOOL_PREFIX=prefix
tsk.env.DBUS_BINDING_TOOL_MODE=mode
class dbus_binding_tool(Task.Task):
color='BLUE'
ext_out=['.h']
run_str='${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}'
shell=True
def configure(conf):
dbus_binding_tool=conf.find_program('dbus-binding-tool',var='DBUS_BINDING_TOOL')
| gpl-2.0 |
suiyuan2009/tensorflow | tensorflow/compiler/tests/reverse_ops_test.py | 103 | 2302 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Reverse Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class ReverseOpsTest(XLATestCase):
def testReverseOneDim(self):
shape = (7, 5, 9, 11)
for revdim in range(len(shape)):
self._AssertReverseEqual([revdim], shape)
def testReverseMoreThanOneDim(self):
shape = (7, 5, 9, 11)
for revdims in itertools.chain.from_iterable(
itertools.combinations(range(len(shape)), k)
for k in range(2, len(shape)+1)):
self._AssertReverseEqual(revdims, shape)
def _AssertReverseEqual(self, revdims, shape):
np.random.seed(120)
pval = np.random.randint(0, 100, size=shape).astype(float)
with self.test_session():
with self.test_scope():
p = array_ops.placeholder(dtypes.int32, shape=shape)
axis = constant_op.constant(
np.array(revdims, dtype=np.int32),
shape=(len(revdims),), dtype=dtypes.int32)
rval = array_ops.reverse(p, axis).eval({p: pval})
slices = [
slice(-1, None, -1) if d in revdims else slice(None)
for d in range(len(shape))]
self.assertEqual(
pval[slices].flatten().tolist(),
rval.flatten().tolist())
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
Akrog/sqlalchemy | lib/sqlalchemy/orm/instrumentation.py | 60 | 17510 | # orm/instrumentation.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines SQLAlchemy's system of class instrumentation.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
instrumentation.py deals with registration of end-user classes
for state tracking. It interacts closely with state.py
and attributes.py which establish per-instance and per-class-attribute
instrumentation, respectively.
The class instrumentation system can be customized on a per-class
or global basis using the :mod:`sqlalchemy.ext.instrumentation`
module, which provides the means to build and specify
alternate instrumentation forms.
.. versionchanged: 0.8
The instrumentation extension system was moved out of the
ORM and into the external :mod:`sqlalchemy.ext.instrumentation`
package. When that package is imported, it installs
itself within sqlalchemy.orm so that its more comprehensive
resolution mechanics take effect.
"""
from . import exc, collections, interfaces, state
from .. import util
from . import base
_memoized_key_collection = util.group_expirable_memoized_property()
class ClassManager(dict):
"""tracks state information at the class level."""
MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
STATE_ATTR = base.DEFAULT_STATE_ATTR
_state_setter = staticmethod(util.attrsetter(STATE_ATTR))
deferred_scalar_loader = None
original_init = object.__init__
factory = None
def __init__(self, class_):
self.class_ = class_
self.info = {}
self.new_init = None
self.local_attrs = {}
self.originals = {}
self._bases = [mgr for mgr in [
manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
] if mgr is not None]
for base in self._bases:
self.update(base)
self.dispatch._events._new_classmanager_instance(class_, self)
# events._InstanceEventsHold.populate(class_, self)
for basecls in class_.__mro__:
mgr = manager_of_class(basecls)
if mgr is not None:
self.dispatch._update(mgr.dispatch)
self.manage()
self._instrument_init()
if '__del__' in class_.__dict__:
util.warn("__del__() method on class %s will "
"cause unreachable cycles and memory leaks, "
"as SQLAlchemy instrumentation often creates "
"reference cycles. Please remove this method." %
class_)
def __hash__(self):
return id(self)
def __eq__(self, other):
return other is self
@property
def is_mapped(self):
return 'mapper' in self.__dict__
@_memoized_key_collection
def _all_key_set(self):
return frozenset(self)
@_memoized_key_collection
def _collection_impl_keys(self):
return frozenset([
attr.key for attr in self.values() if attr.impl.collection])
@_memoized_key_collection
def _scalar_loader_impls(self):
return frozenset([
attr.impl for attr in
self.values() if attr.impl.accepts_scalar_loader])
@util.memoized_property
def mapper(self):
# raises unless self.mapper has been assigned
raise exc.UnmappedClassError(self.class_)
def _all_sqla_attributes(self, exclude=None):
"""return an iterator of all classbound attributes that are
implement :class:`.InspectionAttr`.
This includes :class:`.QueryableAttribute` as well as extension
types such as :class:`.hybrid_property` and
:class:`.AssociationProxy`.
"""
if exclude is None:
exclude = set()
for supercls in self.class_.__mro__:
for key in set(supercls.__dict__).difference(exclude):
exclude.add(key)
val = supercls.__dict__[key]
if isinstance(val, interfaces.InspectionAttr):
yield key, val
def _attr_has_impl(self, key):
"""Return True if the given attribute is fully initialized.
i.e. has an impl.
"""
return key in self and self[key].impl is not None
def _subclass_manager(self, cls):
"""Create a new ClassManager for a subclass of this ClassManager's
class.
This is called automatically when attributes are instrumented so that
the attributes can be propagated to subclasses against their own
class-local manager, without the need for mappers etc. to have already
pre-configured managers for the full class hierarchy. Mappers
can post-configure the auto-generated ClassManager when needed.
"""
manager = manager_of_class(cls)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(cls)
return manager
def _instrument_init(self):
# TODO: self.class_.__init__ is often the already-instrumented
# __init__ from an instrumented superclass. We still need to make
# our own wrapper, but it would
# be nice to wrap the original __init__ and not our existing wrapper
# of such, since this adds method overhead.
self.original_init = self.class_.__init__
self.new_init = _generate_init(self.class_, self)
self.install_member('__init__', self.new_init)
def _uninstrument_init(self):
if self.new_init:
self.uninstall_member('__init__')
self.new_init = None
@util.memoized_property
def _state_constructor(self):
self.dispatch.first_init(self, self.class_)
return state.InstanceState
def manage(self):
"""Mark this instance as the manager for its class."""
setattr(self.class_, self.MANAGER_ATTR, self)
def dispose(self):
"""Dissasociate this manager from its class."""
delattr(self.class_, self.MANAGER_ATTR)
@util.hybridmethod
def manager_getter(self):
return _default_manager_getter
@util.hybridmethod
def state_getter(self):
"""Return a (instance) -> InstanceState callable.
"state getter" callables should raise either KeyError or
AttributeError if no InstanceState could be found for the
instance.
"""
return _default_state_getter
@util.hybridmethod
def dict_getter(self):
return _default_dict_getter
def instrument_attribute(self, key, inst, propagated=False):
if propagated:
if key in self.local_attrs:
return # don't override local attr with inherited attr
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
_memoized_key_collection.expire_instance(self)
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def subclass_managers(self, recursive):
for cls in self.class_.__subclasses__():
mgr = manager_of_class(cls)
if mgr is not None and mgr is not self:
yield mgr
if recursive:
for m in mgr.subclass_managers(True):
yield m
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.\
attribute_instrument(self.class_, key, self[key])
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
_memoized_key_collection.expire_instance(self)
del self[key]
for cls in self.class_.__subclasses__():
manager = manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
def unregister(self):
"""remove all instrumentation established by this ClassManager."""
self._uninstrument_init()
self.mapper = self.dispatch = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
def install_descriptor(self, key, inst):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError("%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." %
key)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key):
delattr(self.class_, key)
def install_member(self, key, implementation):
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError("%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." %
key)
self.originals.setdefault(key, getattr(self.class_, key, None))
setattr(self.class_, key, implementation)
def uninstall_member(self, key):
original = self.originals.pop(key, None)
if original is not None:
setattr(self.class_, key, original)
def instrument_collection_class(self, key, collection_class):
return collections.prepare_instrumentation(collection_class)
def initialize_collection(self, key, state, factory):
user_data = factory()
adapter = collections.CollectionAdapter(
self.get_impl(key), state, user_data)
return adapter, user_data
def is_instrumented(self, key, search=False):
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key):
return self[key].impl
@property
def attributes(self):
return iter(self.values())
# InstanceState management
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return instance
def setup_instance(self, instance, state=None):
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
def teardown_instance(self, instance):
delattr(instance, self.STATE_ATTR)
def _serialize(self, state, state_dict):
return _SerializeManager(state, state_dict)
def _new_state_if_none(self, instance):
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if hasattr(instance, self.STATE_ATTR):
return False
elif self.class_ is not instance.__class__ and \
self.is_mapped:
# this will create a new ClassManager for the
# subclass, without a mapper. This is likely a
# user error situation but allow the object
# to be constructed, so that it is usable
# in a non-ORM context at least.
return self._subclass_manager(instance.__class__).\
_new_state_if_none(instance)
else:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return state
def has_state(self, instance):
return hasattr(instance, self.STATE_ATTR)
def has_parent(self, state, key, optimistic=False):
"""TODO"""
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __bool__(self):
"""All ClassManagers are non-zero regardless of attribute state."""
return True
__nonzero__ = __bool__
def __repr__(self):
return '<%s of %r at %x>' % (
self.__class__.__name__, self.class_, id(self))
class _SerializeManager(object):
"""Provide serialization of a :class:`.ClassManager`.
The :class:`.InstanceState` uses ``__init__()`` on serialize
and ``__call__()`` on deserialize.
"""
def __init__(self, state, d):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" %
self.class_)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._configure_all()
# setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
class InstrumentationFactory(object):
"""Factory for new ClassManager instances."""
def create_manager_for_cls(self, class_):
assert class_ is not None
assert manager_of_class(class_) is None
# give a more complicated subclass
# a chance to do what it wants here
manager, factory = self._locate_extended_factory(class_)
if factory is None:
factory = ClassManager
manager = factory(class_)
self._check_conflicts(class_, factory)
manager.factory = factory
self.dispatch.class_instrument(class_)
return manager
def _locate_extended_factory(self, class_):
"""Overridden by a subclass to do an extended lookup."""
return None, None
def _check_conflicts(self, class_, factory):
"""Overridden by a subclass to test for conflicting factories."""
return
def unregister(self, class_):
manager = manager_of_class(class_)
manager.unregister()
manager.dispose()
self.dispatch.class_uninstrument(class_)
if ClassManager.MANAGER_ATTR in class_.__dict__:
delattr(class_, ClassManager.MANAGER_ATTR)
# this attribute is replaced by sqlalchemy.ext.instrumentation
# when importred.
_instrumentation_factory = InstrumentationFactory()
# these attributes are replaced by sqlalchemy.ext.instrumentation
# when a non-standard InstrumentationManager class is first
# used to instrument a class.
instance_state = _default_state_getter = base.instance_state
instance_dict = _default_dict_getter = base.instance_dict
manager_of_class = _default_manager_getter = base.manager_of_class
def register_class(class_):
"""Register class instrumentation.
Returns the existing or newly created class manager.
"""
manager = manager_of_class(class_)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(class_)
return manager
def unregister_class(class_):
"""Unregister class instrumentation."""
_instrumentation_factory.unregister(class_)
def is_instrumented(instance, key):
"""Return True if the given attribute on the given instance is
instrumented by the attributes package.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
"""
return manager_of_class(instance.__class__).\
is_instrumented(key, search=True)
def _generate_init(class_, class_manager):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__
assert original__init__
# Go through some effort here and don't change the user's __init__
# calling signature, including the unlikely case that it has
# a return value.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state._initialize_instance(%(apply_kw)s)
else:
return original__init__(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original__init__, grouped=False)
func_text = func_body % func_vars
if util.py2k:
func = getattr(original__init__, 'im_func', original__init__)
func_defaults = getattr(func, 'func_defaults', None)
else:
func_defaults = getattr(original__init__, '__defaults__', None)
func_kw_defaults = getattr(original__init__, '__kwdefaults__', None)
env = locals().copy()
exec(func_text, env)
__init__ = env['__init__']
__init__.__doc__ = original__init__.__doc__
if func_defaults:
__init__.__defaults__ = func_defaults
if not util.py2k and func_kw_defaults:
__init__.__kwdefaults__ = func_kw_defaults
return __init__
| mit |
fabianrost84/cython | Cython/Compiler/Main.py | 1 | 30332 | #
# Cython Top Level
#
from __future__ import absolute_import
import os
import re
import sys
import io
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 2):
sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.2+, found %d.%d\n" % tuple(sys.version_info[:2]))
sys.exit(1)
try:
from __builtin__ import basestring
except ImportError:
basestring = str
from . import Errors
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
from .Symtab import ModuleScope
from .. import Utils
from . import Options
from . import Version # legacy import needed by old PyTables versions
version = Version.version # legacy attribute - use "Cython.__version__" instead
module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
verbose = 0
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
# While Context contains every pxd ever loaded, path information etc.,
# this only contains the data related to a single compilation pass
#
# pyx ModuleNode Main code tree of this compilation.
# pxds {string : ModuleNode} Trees for the pxds used in the pyx.
# codewriter CCodeWriter Where to output final code.
# options CompilationOptions
# result CompilationResult
pass
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
# associated and imported declaration files. It includes
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : ModuleScope}
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
cython_scope = None
def __init__(self, include_directories, compiler_directives, cpp=False,
language_level=2, options=None, create_testscope=True):
# cython_scope is a hack, set to False by subclasses, in order to break
# an infinite loop.
# Better code organization would fix it.
from . import Builtin, CythonScope
self.modules = {"__builtin__" : Builtin.builtin_scope}
self.cython_scope = CythonScope.create_cython_scope(self)
self.modules["cython"] = self.cython_scope
self.include_directories = include_directories
self.future_directives = set()
self.compiler_directives = compiler_directives
self.cpp = cpp
self.options = options
self.pxds = {} # full name -> node tree
self._interned = {} # (type(value), value, *key_args) -> interned_value
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
self.include_directories = include_directories + [standard_include_path]
self.set_language_level(language_level)
self.gdb_debug_outputwriter = None
def set_language_level(self, level):
self.language_level = level
if level >= 3:
from .Future import print_function, unicode_literals, absolute_import, division
self.future_directives.update([print_function, unicode_literals, absolute_import, division])
self.modules['builtins'] = self.modules['__builtin__']
def intern_ustring(self, value, encoding=None):
key = (EncodedString, value, encoding)
try:
return self._interned[key]
except KeyError:
pass
value = EncodedString(value)
if encoding:
value.encoding = encoding
self._interned[key] = value
return value
def intern_value(self, value, *key):
key = (type(value), value) + key
try:
return self._interned[key]
except KeyError:
pass
self._interned[key] = value
return value
# pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name):
from . import Pipeline
if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
source = CompilationSource(source_desc, module_name, os.getcwd())
result_sink = create_default_resultobj(source, self.options)
pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
result = Pipeline.run_pipeline(pipeline, source)
else:
pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
result = Pipeline.run_pipeline(pipeline, source_desc)
return result
def nonfatal_error(self, exc):
return Errors.report_error(exc)
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,
absolute_fallback=True):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % (
module_name, relative_to, pos, need_pxd))
scope = None
pxd_pathname = None
if relative_to:
if module_name:
# from .module import ...
qualified_name = relative_to.qualify_name(module_name)
else:
# from . import ...
qualified_name = relative_to.qualified_name
scope = relative_to
relative_to = None
else:
qualified_name = module_name
if not module_name_pattern.match(qualified_name):
raise CompileError(pos or (module_name, 0, 0),
"'%s' is not a valid module name" % module_name)
if relative_to:
if debug_find_module:
print("...trying relative import")
scope = relative_to.lookup_submodule(module_name)
if not scope:
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print("...trying absolute import")
if absolute_fallback:
qualified_name = module_name
scope = self
for name in qualified_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print("...scope = %s" % scope)
if not scope.pxd_file_loaded:
if debug_find_module:
print("...pxd not loaded")
if not pxd_pathname:
if debug_find_module:
print("...looking for pxd file")
# Only look in sys.path if we are explicitly looking
# for a .pxd file.
pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd)
if debug_find_module:
print("......found %s" % pxd_pathname)
if not pxd_pathname and need_pxd:
# Set pxd_file_loaded such that we don't need to
# look for the non-existing pxd file next time.
scope.pxd_file_loaded = True
package_pathname = self.search_include_directories(qualified_name, ".py", pos)
if package_pathname and package_pathname.endswith('__init__.py'):
pass
else:
error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep))
if pxd_pathname:
scope.pxd_file_loaded = True
try:
if debug_find_module:
print("Context.find_module: Parsing %s" % pxd_pathname)
rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
if not pxd_pathname.endswith(rel_path):
rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
err, result = self.process_pxd(source_desc, scope, qualified_name)
if err:
raise err
(pxd_codenodes, pxd_scope) = result
self.pxds[module_name] = (pxd_codenodes, pxd_scope)
except CompileError:
pass
return scope
def find_pxd_file(self, qualified_name, pos, sys_path=True):
# Search include path (and sys.path if sys_path is True) for
# the .pxd file corresponding to the given fully-qualified
# module name.
# Will find either a dotted filename or a file in a
# package directory. If a source file position is given,
# the directory containing the source file is searched first
# for a dotted filename, and its containing package root
# directory is searched first for a non-dotted filename.
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
self.include_directories.append(deprecated_include_path)
try:
pxd = self.search_include_directories(qualified_name, ".pxd", pos)
finally:
self.include_directories.pop()
if pxd:
name = qualified_name
if name.startswith('python'):
warning(pos, "'%s' is deprecated, use 'cpython'" % name, 1)
elif name in ('stdlib', 'stdio'):
warning(pos, "'%s' is deprecated, use 'libc.%s'" % (name, name), 1)
elif name in ('stl'):
warning(pos, "'%s' is deprecated, use 'libcpp.*.*'" % name, 1)
if pxd is None and Options.cimport_from_pyx:
return self.find_pyx_file(qualified_name, pos)
return pxd
def find_pyx_file(self, qualified_name, pos):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, "", pos,
include=True)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, qualified_name, suffix, pos,
include=False, sys_path=False):
return Utils.search_include_directories(
tuple(self.include_directories), qualified_name, suffix, pos, include, sys_path)
def find_root_package_dir(self, file_path):
return Utils.find_root_package_dir(file_path)
def check_package_dir(self, dir, package_names):
return Utils.check_package_dir(dir, tuple(package_names))
def c_file_out_of_date(self, source_path, output_path):
if not os.path.exists(output_path):
return 1
c_time = Utils.modification_time(output_path)
if Utils.file_newer_than(source_path, c_time):
return 1
pos = [source_path]
pxd_path = Utils.replace_suffix(source_path, ".pxd")
if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
return 1
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
dep_path = self.find_pxd_file(name, pos)
elif kind == "include":
dep_path = self.search_include_directories(name, pos)
else:
continue
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
return Utils.is_package_dir(dir_path)
def read_dependency_file(self, source_path):
dep_path = Utils.replace_suffix(source_path, ".dep")
if os.path.exists(dep_path):
f = open(dep_path, "rU")
chunks = [ line.strip().split(" ", 1)
for line in f.readlines()
if " " in line.strip() ]
f.close()
return chunks
else:
return ()
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_desc, scope, pxd, full_module_name):
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
source_filename = source_desc.filename
scope.cpp = self.cpp
# Parse the given source file and return a parse tree.
num_errors = Errors.num_errors
try:
with Utils.open_source_file(source_filename) as f:
from . import Parsing
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
tree = Parsing.p_module(s, pxd, full_module_name)
if self.options.formal_grammar:
try:
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
"Formal grammer can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
#traceback.print_exc()
raise self._report_decode_error(source_desc, e)
if Errors.num_errors > num_errors:
raise CompileError()
return tree
def _report_decode_error(self, source_desc, exc):
msg = exc.args[-1]
position = exc.args[2]
encoding = exc.args[0]
line = 1
column = idx = 0
with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f:
for line, data in enumerate(f, 1):
idx += len(data)
if idx >= position:
column = position - (idx - len(data)) + 1
break
return error((source_desc, line, column),
"Decoding error, missing or incorrect coding=<encoding-name> "
"at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
def extract_module_name(self, path, options):
# Find fully_qualified module name from the full pathname
# of a source file.
dir, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
if "." in module_name:
return module_name
names = [module_name]
while self.is_package_dir(dir):
parent, package_name = os.path.split(dir)
if parent == dir:
break
names.append(package_name)
dir = parent
names.reverse()
return ".".join(names)
def setup_errors(self, options, result):
Errors.reset() # clear any remaining error state
if options.use_listing_file:
path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis")
else:
path = None
Errors.open_listing_file(path=path,
echo_to_stderr=options.errors_to_stderr)
def teardown_errors(self, err, options, result):
source_desc = result.compilation_source.source_desc
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
err = True
if err and result.c_file:
try:
Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
except EnvironmentError:
pass
result.c_file = None
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
else:
c_suffix = ".c"
suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
if options.output_file:
out_path = os.path.join(cwd, options.output_file)
if os.path.isdir(out_path):
return os.path.join(out_path, os.path.basename(suggested_file_name))
else:
return out_path
else:
return suggested_file_name
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
result.compilation_source = compilation_source
source_desc = compilation_source.source_desc
result.c_file = get_output_filename(source_desc.filename,
compilation_source.cwd, options)
result.embedded_metadata = options.embedded_metadata
return result
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
source_ext = os.path.splitext(source)[1]
options.configure_language_defaults(source_ext[1:]) # py/pyx
if context is None:
context = options.create_context()
# Set up source object
cwd = os.getcwd()
abs_path = os.path.abspath(source)
full_module_name = full_module_name or context.extract_module_name(source, options)
if options.relative_path_in_code_position_comments:
rel_path = full_module_name.replace('.', os.sep) + source_ext
if not abs_path.endswith(rel_path):
rel_path = source # safety measure to prevent printing incorrect paths
else:
rel_path = abs_path
source_desc = FileSourceDescriptor(abs_path, rel_path)
source = CompilationSource(source_desc, full_module_name, cwd)
# Set up result object
result = create_default_resultobj(source, options)
if options.annotate is None:
# By default, decide based on whether an html file already exists.
html_filename = os.path.splitext(result.c_file)[0] + ".html"
if os.path.exists(html_filename):
with io.open(html_filename, "r", encoding="UTF-8") as html_file:
if u'<!-- Generated by Cython' in html_file.read(100):
options.annotate = True
# Get pipeline
if source_ext.lower() == '.py' or not source_ext:
pipeline = Pipeline.create_py_pipeline(context, options, result)
else:
pipeline = Pipeline.create_pyx_pipeline(context, options, result)
context.setup_errors(options, result)
err, enddata = Pipeline.run_pipeline(pipeline, source)
context.teardown_errors(err, options, result)
return result
#------------------------------------------------------------------------
#
# Main Python entry points
#
#------------------------------------------------------------------------
class CompilationSource(object):
"""
Contains the data necesarry to start up a compilation pipeline for
a single compilation unit.
"""
def __init__(self, source_desc, full_module_name, cwd):
self.source_desc = source_desc
self.full_module_name = full_module_name
self.cwd = cwd
class CompilationOptions(object):
"""
Options to the Cython compiler:
show_version boolean Display version number
use_listing_file boolean Generate a .lis file
errors_to_stderr boolean Echo errors to stderr when using .lis
include_path [string] Directories to search for include files
output_file string Name of generated .c file
generate_pxi boolean Generate .pxi file for public declarations
capi_reexport_cincludes
boolean Add cincluded headers to any auto-generated
header files.
timestamps boolean Only compile changed source files.
verbose boolean Always print source names being compiled
compiler_directives dict Overrides for pragma options (see Options.py)
embedded_metadata dict Metadata to embed in the C file as json.
evaluate_tree_assertions boolean Test support: evaluate parse tree assertions
language_level integer The Python language level: 2 or 3
formal_grammar boolean Parse the file with the formal grammar
cplus boolean Compile as c++ code
"""
def __init__(self, defaults=None, **kw):
self.include_path = []
if defaults:
if isinstance(defaults, CompilationOptions):
defaults = defaults.__dict__
else:
defaults = default_options
options = dict(defaults)
options.update(kw)
# let's assume 'default_options' contains a value for most known compiler options
# and validate against them
unknown_options = set(options) - set(default_options)
# ignore valid options that are not in the defaults
unknown_options.difference_update(['include_path'])
if unknown_options:
# TODO: make this a hard error in 0.22
message = "got unknown compilation option%s, please remove: %s" % (
's' if len(unknown_options) > 1 else '',
', '.join(unknown_options))
import warnings
warnings.warn(message)
directives = dict(options['compiler_directives']) # copy mutable field
options['compiler_directives'] = directives
if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = int(directives['language_level'])
if 'formal_grammar' in directives and 'formal_grammar' not in kw:
options['formal_grammar'] = directives['formal_grammar']
if options['cache'] is True:
options['cache'] = os.path.expanduser("~/.cycache")
self.__dict__.update(options)
def configure_language_defaults(self, source_extension):
if source_extension == 'py':
if self.compiler_directives.get('binding') is None:
self.compiler_directives['binding'] = True
def create_context(self):
return Context(self.include_path, self.compiler_directives,
self.cplus, self.language_level, options=self)
class CompilationResult(object):
"""
Results from the Cython compiler:
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
api_file string or None The generated C API .h file
listing_file string or None File of error messages
object_file string or None Result of compiling the C file
extension_file string or None Result of linking the object file
num_errors integer Number of compilation errors
compilation_source CompilationSource
"""
def __init__(self):
self.c_file = None
self.h_file = None
self.i_file = None
self.api_file = None
self.listing_file = None
self.object_file = None
self.extension_file = None
self.main_source_file = None
class CompilationResultSet(dict):
"""
Results from compiling multiple Pyrex source files. A mapping
from source file paths to CompilationResult instances. Also
has the following attributes:
num_errors integer Total number of compilation errors
"""
num_errors = 0
def add(self, source, result):
self[source] = result
self.num_errors += result.num_errors
def compile_single(source, options, full_module_name = None):
"""
compile_single(source, options, full_module_name)
Compile the given Pyrex implementation file and return a CompilationResult.
Always compiles a single file; does not perform timestamp checking or
recursion.
"""
return run_pipeline(source, options, full_module_name)
def compile_multiple(sources, options):
"""
compile_multiple(sources, options)
Compiles the given sequence of Pyrex implementation files and returns
a CompilationResultSet. Performs timestamp checking and/or recursion
if these are specified in the options.
"""
# run_pipeline creates the context
# context = options.create_context()
sources = [os.path.abspath(source) for source in sources]
processed = set()
results = CompilationResultSet()
timestamps = options.timestamps
verbose = options.verbose
context = None
cwd = os.getcwd()
for source in sources:
if source not in processed:
if context is None:
context = options.create_context()
output_filename = get_output_filename(source, cwd, options)
out_of_date = context.c_file_out_of_date(source, output_filename)
if (not timestamps) or out_of_date:
if verbose:
sys.stderr.write("Compiling %s\n" % source)
result = run_pipeline(source, options, context=context)
results.add(source, result)
# Compiling multiple sources in one context doesn't quite
# work properly yet.
context = None
processed.add(source)
return results
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependecies. The source argument may be a string
or a sequence of strings If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
options = CompilationOptions(defaults = options, **kwds)
if isinstance(source, basestring) and not options.timestamps:
return compile_single(source, options, full_module_name)
else:
return compile_multiple(source, options)
#------------------------------------------------------------------------
#
# Main command-line entry point
#
#------------------------------------------------------------------------
def setuptools_main():
return main(command_line = 1)
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
if command_line:
from .CmdLine import parse_command_line
options, sources = parse_command_line(args)
else:
options = CompilationOptions(default_options)
sources = args
if options.show_version:
sys.stderr.write("Cython version %s\n" % version)
if options.working_path!="":
os.chdir(options.working_path)
try:
result = compile(sources, options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write(str(e) + '\n')
any_failures = 1
if any_failures:
sys.exit(1)
#------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
#------------------------------------------------------------------------
default_options = dict(
show_version = 0,
use_listing_file = 0,
errors_to_stderr = 1,
cplus = 0,
output_file = None,
annotate = None,
annotate_coverage_xml = None,
generate_pxi = 0,
capi_reexport_cincludes = 0,
working_path = "",
timestamps = None,
verbose = 0,
quiet = 0,
compiler_directives = {},
embedded_metadata = {},
evaluate_tree_assertions = False,
emit_linenums = False,
relative_path_in_code_position_comments = True,
c_line_in_traceback = True,
language_level = 2,
formal_grammar = False,
gdb_debug = False,
compile_time_env = None,
common_utility_include_dir = None,
output_dir=None,
build_dir=None,
cache=None,
)
| apache-2.0 |
rackerlabs/django-DefectDojo | dojo/tools/acunetix/parser_helper.py | 2 | 6078 | import logging
from lxml import etree
from lxml.etree import XMLSyntaxError
from .parser_models import AcunetixScanReport
from .parser_models import DefectDojoFinding
# from memory_profiler import profile #Comment out this and profile in defectdojo repo
import html2text
logging.basicConfig(level=logging.ERROR)
SCAN_NODE_TAG_NAME = "Scan"
ACUNETIX_XML_SCAN_IGNORE_NODES = ['Technologies', 'Crawler']
ACUNETIX_XML_REPORTITEM_IGNORE_NODES = ['TechnicalDetails', 'CVEList', 'CVSS', 'CVSS3']
# @profile
def get_root_node(filename):
"""
This method returns root node.
:param filename:
:return:
"""
try:
tree = etree.parse(filename)
return tree.getroot()
except XMLSyntaxError as xse:
logging.error("ERROR : error parsing XML file {filename}".format(filename=filename))
raise xse
except IOError as ioe:
logging.error("ERROR : xml file {filename} does't exist.".format(filename=filename))
raise ioe
except Exception as e:
logging.error("ERROR : exception while processing XML file {filename}".format(filename=filename))
raise e
# @profile
def get_scan_node(root):
"""
This method return scan node.
:param root:
:return: scan node
"""
scan_node = root[0]
if scan_node.tag == SCAN_NODE_TAG_NAME:
return scan_node
else:
error_text = "ERROR: '{scan_node_tag_name}' node must be first " \
"child of root element '{root_tag_name}'.".format(
scan_node_tag_name=SCAN_NODE_TAG_NAME,
root_tag_name=root.tag
)
raise Exception(error_text)
# @profile
def get_report_item_references_url(references_node):
"""
This method fetches report item reference urls.
:param references_node:
:return: reference urls
"""
references_urls = []
for reference_node in list(references_node):
for child in list(reference_node):
if child.tag == 'URL':
references_urls.append(child.text)
return references_urls
# @profile
def get_cwe_id(cwelist_node):
"""
Return cwe id number
:param cwelist_node:
:return:
"""
# Assuming CWEList contains only CWE node
cwe = cwelist_node[0]
return cwe.text
# @profile
def get_scan_report_items_details(report_items_node):
"""
Return report items.
:param report_items_node:
:return: report items
"""
report_items = []
if not list(report_items_node):
logging.info("INFO : Report Items empty.")
else:
for report_item_node in list(report_items_node):
report_item = dict()
for child in list(report_item_node):
if child.tag not in ACUNETIX_XML_REPORTITEM_IGNORE_NODES:
if child.tag == 'References':
references_urls = get_report_item_references_url(child)
report_item['ReferencesURLs'] = references_urls
elif child.tag == 'CWEList':
cwe_id = get_cwe_id(child)
report_item['CWEId'] = cwe_id
else:
report_item[child.tag] = child.text
report_items.append(report_item)
return report_items
# @profile
def get_scan_details(scan_node):
"""
Fetches scan details from XML and returns it.
:param scan_node:
:return: scan_details
"""
scan_details = dict()
for child in list(scan_node):
if child.tag not in ACUNETIX_XML_SCAN_IGNORE_NODES:
if child.tag == 'ReportItems':
report_items = get_scan_report_items_details(child)
scan_details['ReportItems'] = report_items
else:
scan_details[child.tag] = child.text
if scan_details:
return scan_details
else:
error_text = "ERROR: fetching scan details from 'Scan' node. 'Scan' node can't be empty."
raise Exception(error_text)
# @profile
def get_acunetix_scan_report(filename):
"""
creates accunetix scan report.
:param filename:
:return: acunetix scan report
"""
root = get_root_node(filename)
scan_node = get_scan_node(root)
scan_details = get_scan_details(scan_node)
acunetix_scan_report = AcunetixScanReport(**scan_details)
return acunetix_scan_report
# @profile
def get_html2text(html):
"""
converts html to text
:param html:
:return: text
"""
text_maker = html2text.HTML2Text()
text_maker.body_width = 0
return text_maker.handle(html)
# @profile
def get_defectdojo_findings(filename):
"""
Returns defect dojo findings.
:param filename:
:return: defectdojo findings
"""
acunetix_scan_report = get_acunetix_scan_report(filename)
defectdojo_findings = []
for report_item in acunetix_scan_report.ReportItems:
defectdojo_finding = dict()
cwe = report_item['CWEId']
url = acunetix_scan_report.StartURL
title = acunetix_scan_report.Name + "_" + url + "_" + cwe + "_" + report_item['Affects']
defectdojo_finding['title'] = title
defectdojo_finding['date'] = acunetix_scan_report.StartTime
defectdojo_finding['cwe'] = cwe
defectdojo_finding['url'] = url
defectdojo_finding['severity'] = report_item['Severity']
defectdojo_finding['description'] = get_html2text(report_item['Description'])
defectdojo_finding['mitigation'] = get_html2text(report_item['Recommendation'])
defectdojo_finding['impact'] = get_html2text(report_item['Impact'])
defectdojo_finding['references'] = report_item['ReferencesURLs']
defectdojo_finding['false_p'] = report_item['IsFalsePositive']
finding = DefectDojoFinding(**defectdojo_finding)
defectdojo_findings.append(finding)
return defectdojo_findings
| bsd-3-clause |
Gustry/inasafe | safe/common/parameters/test/example.py | 3 | 1607 | # coding=utf-8
"""Example usage of custom parameters."""
import sys
from safe.test.utilities import get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from PyQt4.QtGui import QApplication, QWidget, QGridLayout
from parameters.qt_widgets.parameter_container import ParameterContainer
from safe.common.parameters.default_value_parameter import (
DefaultValueParameter)
from safe.common.parameters.default_value_parameter_widget import (
DefaultValueParameterWidget)
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
def main():
"""Main function to run the example."""
app = QApplication([])
default_value_parameter = DefaultValueParameter()
default_value_parameter.name = 'Value parameter'
default_value_parameter.help_text = 'Help text'
default_value_parameter.description = 'Description'
default_value_parameter.labels = [
'Setting', 'Do not report', 'Custom']
default_value_parameter.options = [0, 1, None]
parameters = [
default_value_parameter
]
extra_parameters = [
(DefaultValueParameter, DefaultValueParameterWidget)
]
parameter_container = ParameterContainer(
parameters, extra_parameters=extra_parameters)
parameter_container.setup_ui()
widget = QWidget()
layout = QGridLayout()
layout.addWidget(parameter_container)
widget.setLayout(layout)
widget.setGeometry(0, 0, 500, 500)
widget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 |
pducks32/intergrala | python/sympy/sympy/matrices/sparse.py | 12 | 44929 | from __future__ import print_function, division
import copy
from collections import defaultdict
from sympy.core.containers import Dict
from sympy.core.compatibility import is_sequence, as_int
from sympy.core.logic import fuzzy_and
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.iterables import uniq
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .matrices import MatrixBase, ShapeError, a2idx
from .dense import Matrix
import collections
class SparseMatrix(MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
See Also
========
sympy.matrices.dense.Matrix
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], SparseMatrix):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return
self._smat = {}
if len(args) == 3:
self.rows = as_int(args[0])
self.cols = as_int(args[1])
if isinstance(args[2], collections.Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(op(i, j))
if value:
self._smat[(i, j)] = value
elif isinstance(args[2], (dict, Dict)):
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
v = args[2][key]
if v:
self._smat[key] = v
elif is_sequence(args[2]):
if len(args[2]) != self.rows*self.cols:
raise ValueError(
'List length (%s) != rows*columns (%s)' %
(len(args[2]), self.rows*self.cols))
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[(i, j)] = value
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[(i, j)] = value
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._smat.get((i, j), S.Zero)
except (TypeError, IndexError):
if isinstance(i, slice):
i = range(self.rows)[i]
elif is_sequence(i):
pass
else:
if i >= self.rows:
raise IndexError('Row index out of bounds')
i = [i]
if isinstance(j, slice):
j = range(self.cols)[j]
elif is_sequence(j):
pass
else:
if j >= self.cols:
raise IndexError('Col index out of bounds')
j = [j]
return self.extract(i, j)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), S.Zero))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), S.Zero)
def __setitem__(self, key, value):
raise NotImplementedError()
def copy(self):
return self._new(self.rows, self.cols, self._smat)
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self) == self.rows
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
def row(self, i):
"""Returns column i from self as a row vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.row(0)
Matrix([[1, 2]])
See Also
========
col
row_list
"""
return self[i,:]
def col(self, j):
"""Returns column j from self as a column vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.col(0)
Matrix([
[1],
[3]])
See Also
========
row
col_list
"""
return self[:, j]
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
row_op
col_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(k))]
RL = property(row_list, None, None, "Alternate faster representation")
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
CL = property(col_list, None, None, "Alternate faster representation")
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = S.Zero
for i in range(self.cols):
trace += self._smat.get((i, i), 0)
return trace
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
tran = self.zeros(self.cols, self.rows)
for key, value in self._smat.items():
key = key[1], key[0] # reverse
tran._smat[key] = value
return tran
def _eval_conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
conj = self.copy()
for key, value in self._smat.items():
conj._smat[key] = value.conjugate()
return conj
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = dict([(k, v) for k, v in Cdict.items() if v])
return rv
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def __mul__(self, other):
"""Multiply self and other, watching for non-matrix entities.
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, zeros
>>> I = SparseMatrix(eye(3))
>>> I*I == I
True
>>> Z = zeros(3)
>>> I*Z
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> I*2 == 2*I
True
"""
if isinstance(other, SparseMatrix):
return self.multiply(other)
if isinstance(other, MatrixBase):
return other._new(self*self._new(other))
return self.scalar_multiply(other)
def __rmul__(self, other):
"""Return product the same type as other (if a Matrix).
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import Matrix, SparseMatrix
>>> A = Matrix(2, 2, range(1, 5))
>>> S = SparseMatrix(2, 2, range(2, 6))
>>> A*S == S*A
False
>>> (isinstance(A*S, SparseMatrix) ==
... isinstance(S*A, SparseMatrix) == False)
True
"""
if isinstance(other, MatrixBase):
return other*other._new(self)
return self.scalar_multiply(other)
def __add__(self, other):
"""Add other to self, efficiently if possible.
When adding a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> A = SparseMatrix(eye(3)) + SparseMatrix(eye(3))
>>> B = SparseMatrix(eye(3)) + eye(3)
>>> A
Matrix([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]])
>>> A == B
True
>>> isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix)
False
"""
if isinstance(other, SparseMatrix):
return self.add(other)
elif isinstance(other, MatrixBase):
return other._new(other + self)
else:
raise NotImplementedError(
"Cannot add %s to %s" %
tuple([c.__class__.__name__ for c in (other, self)]))
def __neg__(self):
"""Negate all elements of self.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> -SparseMatrix(eye(3))
Matrix([
[-1, 0, 0],
[ 0, -1, 0],
[ 0, 0, -1]])
"""
rv = self.copy()
for k, v in rv._smat.items():
rv._smat[k] = -v
return rv
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, ones
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrix):
raise ValueError('only use add with %s, not %s' %
tuple([c.__class__.__name__ for c in (self, other)]))
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[(urow.index(rk), ucol.index(ck))] = self._smat[(rk, ck)]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv.row(i_previous))
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv.col(i_previous))
return rv
extract.__doc__ = MatrixBase.extract.__doc__
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = SparseMatrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
d = self._smat
yield self.is_square
if len(d) <= self.rows:
yield fuzzy_and(
d[i, i].is_real for i, j in d if i == j)
else:
yield fuzzy_and(
d[i, i].is_real for i in range(self.rows) if (i, i) in d)
yield fuzzy_and(
((self[i, j] - self[j, i].conjugate()).is_zero
if (j, i) in d else False) for (i, j) in d)
return fuzzy_and(i for i in cond())
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def _cholesky_sparse(self):
"""Algorithm for numeric Cholesky factorization of a sparse matrix."""
Crowstruc = self.row_structure_symbolic_cholesky()
C = self.zeros(self.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = self[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] -= summ
C[i, j] /= C[j, j]
else:
C[j, j] = self[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
summ += C[j, k]**2
else:
break
C[j, j] -= summ
C[j, j] = sqrt(C[j, j])
return C
def _LDL_sparse(self):
"""Algorithm for numeric LDL factization, exploiting sparse structure.
"""
Lrowstruc = self.row_structure_symbolic_cholesky()
L = self.eye(self.rows)
D = self.zeros(self.rows, self.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
elif i == j:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"Diagonal solve."
return self._new(self.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def _cholesky_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L = self._cholesky_sparse()
Y = L._lower_triangular_solve(rhs)
rv = L.T._upper_triangular_solve(Y)
return rv
def _LDL_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L, D = self._LDL_sparse()
Z = L._lower_triangular_solve(rhs)
Y = D._diagonal_solve(Z)
return L.T._upper_triangular_solve(Y)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
if M.has(nan) or M.has(oo):
raise ValueError('Cholesky decomposition applies only to '
'positive-definite matrices')
return self._new(M)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
if L.has(nan) or L.has(oo) or D.has(nan) or D.has(oo):
raise ValueError('LDL decomposition applies only to '
'positive-definite matrices')
return self._new(L), self._new(D)
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using Cholesky or LDL (default)
decomposition as selected with the ``method`` keyword: 'CH' or 'LDL',
respectively.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
sym = self.is_symmetric()
M = self.as_mutable()
I = M.eye(M.rows)
if not sym:
t = M.T
r1 = M[0, :]
M = t*M
I = t*I
method = kwargs.get('method', 'LDL')
if method in "LDL":
solve = M._LDL_solve
elif method == "CH":
solve = M._cholesky_solve
else:
raise NotImplementedError(
'Method may be "CH" or "LDL", not %s.' % method)
rv = M.hstack(*[solve(I[:, i]) for i in range(I.cols)])
if not sym:
scale = (r1*rv[:, 0])[0, 0]
rv /= scale
return self._new(rv)
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, SparseMatrix):
return self._smat == other._smat
elif isinstance(other, MatrixBase):
return self._smat == MutableSparseMatrix(other)._smat
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, dict([((i, i), S.One) for i in range(n)]))
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
__hash__ = None
def row_del(self, k):
"""Delete the given row of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.row_del(0)
>>> M
Matrix([[0, 1]])
See Also
========
col_del
"""
newD = {}
k = a2idx(k, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.row_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.col_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B); C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
A, B = self, other
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i, j + A.cols)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[(i, j + A.cols)] = v
A.cols += B.cols
return A
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B); C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
A, B = self, other
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i + A.rows, j)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
"as the in sub-Matrix given by `key`.")
if not isinstance(value, SparseMatrix):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
if rlo <= i < rhi and clo <= j < chi:
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), S.Zero)
fv = f(v, j)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), S.Zero)
fv = f(v, i)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = dict([((i, j), v)
for i in range(self.rows) for j in range(self.cols)])
| mit |
biodrone/plex-desk | desk/flask/lib/python3.4/site-packages/flask/logging.py | 838 | 1398 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger
| mit |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Demo/tix/tixwidgets.py | 8 | 38406 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id$
#
# tixwidgets.py --
#
# For Tix, see http://tix.sourceforge.net
#
# This is a demo program of some of the Tix widgets available in Python.
# If you have installed Python & Tix properly, you can execute this as
#
# % python tixwidgets.py
#
import os, os.path, sys, Tix
from Tkconstants import *
import traceback, tkMessageBox
TCL_DONT_WAIT = 1<<1
TCL_WINDOW_EVENTS = 1<<2
TCL_FILE_EVENTS = 1<<3
TCL_TIMER_EVENTS = 1<<4
TCL_IDLE_EVENTS = 1<<5
TCL_ALL_EVENTS = 0
class Demo:
def __init__(self, top):
self.root = top
self.exit = -1
self.dir = None # script directory
self.balloon = None # balloon widget
self.useBalloons = Tix.StringVar()
self.useBalloons.set('0')
self.statusbar = None # status bar widget
self.welmsg = None # Msg widget
self.welfont = '' # font name
self.welsize = '' # font size
progname = sys.argv[0]
dirname = os.path.dirname(progname)
if dirname and dirname != os.curdir:
self.dir = dirname
index = -1
for i in range(len(sys.path)):
p = sys.path[i]
if p in ("", os.curdir):
index = i
if index >= 0:
sys.path[index] = dirname
else:
sys.path.insert(0, dirname)
else:
self.dir = os.getcwd()
sys.path.insert(0, self.dir+'/samples')
def MkMainMenu(self):
top = self.root
w = Tix.Frame(top, bd=2, relief=RAISED)
file = Tix.Menubutton(w, text='File', underline=0, takefocus=0)
help = Tix.Menubutton(w, text='Help', underline=0, takefocus=0)
file.pack(side=LEFT)
help.pack(side=RIGHT)
fm = Tix.Menu(file, tearoff=0)
file['menu'] = fm
hm = Tix.Menu(help, tearoff=0)
help['menu'] = hm
fm.add_command(label='Exit', underline=1,
command = lambda self=self: self.quitcmd () )
hm.add_checkbutton(label='BalloonHelp', underline=0, command=ToggleHelp,
variable=self.useBalloons)
# The trace variable option doesn't seem to work, instead I use 'command'
#apply(w.tk.call, ('trace', 'variable', self.useBalloons, 'w',
# ToggleHelp))
return w
def MkMainNotebook(self):
top = self.root
w = Tix.NoteBook(top, ipadx=5, ipady=5, options="""
tagPadX 6
tagPadY 4
borderWidth 2
""")
# This may be required if there is no *Background option
top['bg'] = w['bg']
w.add('wel', label='Welcome', underline=0,
createcmd=lambda w=w, name='wel': MkWelcome(w, name))
w.add('cho', label='Choosers', underline=0,
createcmd=lambda w=w, name='cho': MkChoosers(w, name))
w.add('scr', label='Scrolled Widgets', underline=0,
createcmd=lambda w=w, name='scr': MkScroll(w, name))
w.add('mgr', label='Manager Widgets', underline=0,
createcmd=lambda w=w, name='mgr': MkManager(w, name))
w.add('dir', label='Directory List', underline=0,
createcmd=lambda w=w, name='dir': MkDirList(w, name))
w.add('exp', label='Run Sample Programs', underline=0,
createcmd=lambda w=w, name='exp': MkSample(w, name))
return w
def MkMainStatus(self):
global demo
top = self.root
w = Tix.Frame(top, relief=Tix.RAISED, bd=1)
demo.statusbar = Tix.Label(w, relief=Tix.SUNKEN, bd=1)
demo.statusbar.form(padx=3, pady=3, left=0, right='%70')
return w
def build(self):
root = self.root
z = root.winfo_toplevel()
z.wm_title('Tix Widget Demonstration')
if z.winfo_screenwidth() <= 800:
z.geometry('790x590+10+10')
else:
z.geometry('890x640+10+10')
demo.balloon = Tix.Balloon(root)
frame1 = self.MkMainMenu()
frame2 = self.MkMainNotebook()
frame3 = self.MkMainStatus()
frame1.pack(side=TOP, fill=X)
frame3.pack(side=BOTTOM, fill=X)
frame2.pack(side=TOP, expand=1, fill=BOTH, padx=4, pady=4)
demo.balloon['statusbar'] = demo.statusbar
z.wm_protocol("WM_DELETE_WINDOW", lambda self=self: self.quitcmd())
# To show Tcl errors - uncomment this to see the listbox bug.
# Tkinter defines a Tcl tkerror procedure that in effect
# silences all background Tcl error reporting.
# root.tk.eval('if {[info commands tkerror] != ""} {rename tkerror pytkerror}')
def quitcmd (self):
"""Quit our mainloop. It is up to you to call root.destroy() after."""
self.exit = 0
def loop(self):
"""This is an explict replacement for _tkinter mainloop()
It lets you catch keyboard interrupts easier, and avoids
the 20 msec. dead sleep() which burns a constant CPU."""
while self.exit < 0:
# There are 2 whiles here. The outer one lets you continue
# after a ^C interrupt.
try:
# This is the replacement for _tkinter mainloop()
# It blocks waiting for the next Tcl event using select.
while self.exit < 0:
self.root.tk.dooneevent(TCL_ALL_EVENTS)
except SystemExit:
# Tkinter uses SystemExit to exit
#print 'Exit'
self.exit = 1
return
except KeyboardInterrupt:
if tkMessageBox.askquestion ('Interrupt', 'Really Quit?') == 'yes':
# self.tk.eval('exit')
self.exit = 1
return
continue
except:
# Otherwise it's some other error - be nice and say why
t, v, tb = sys.exc_info()
text = ""
for line in traceback.format_exception(t,v,tb):
text += line + '\n'
try: tkMessageBox.showerror ('Error', text)
except: pass
self.exit = 1
raise SystemExit, 1
def destroy (self):
self.root.destroy()
def RunMain(root):
global demo
demo = Demo(root)
demo.build()
demo.loop()
demo.destroy()
# Tabs
def MkWelcome(nb, name):
w = nb.page(name)
bar = MkWelcomeBar(w)
text = MkWelcomeText(w)
bar.pack(side=TOP, fill=X, padx=2, pady=2)
text.pack(side=TOP, fill=BOTH, expand=1)
def MkWelcomeBar(top):
global demo
w = Tix.Frame(top, bd=2, relief=Tix.GROOVE)
b1 = Tix.ComboBox(w, command=lambda w=top: MainTextFont(w))
b2 = Tix.ComboBox(w, command=lambda w=top: MainTextFont(w))
b1.entry['width'] = 15
b1.slistbox.listbox['height'] = 3
b2.entry['width'] = 4
b2.slistbox.listbox['height'] = 3
demo.welfont = b1
demo.welsize = b2
b1.insert(Tix.END, 'Courier')
b1.insert(Tix.END, 'Helvetica')
b1.insert(Tix.END, 'Lucida')
b1.insert(Tix.END, 'Times Roman')
b2.insert(Tix.END, '8')
b2.insert(Tix.END, '10')
b2.insert(Tix.END, '12')
b2.insert(Tix.END, '14')
b2.insert(Tix.END, '18')
b1.pick(1)
b2.pick(3)
b1.pack(side=Tix.LEFT, padx=4, pady=4)
b2.pack(side=Tix.LEFT, padx=4, pady=4)
demo.balloon.bind_widget(b1, msg='Choose\na font',
statusmsg='Choose a font for this page')
demo.balloon.bind_widget(b2, msg='Point size',
statusmsg='Choose the font size for this page')
return w
def MkWelcomeText(top):
global demo
w = Tix.ScrolledWindow(top, scrollbar='auto')
win = w.window
text = 'Welcome to TIX in Python'
title = Tix.Label(win,
bd=0, width=30, anchor=Tix.N, text=text)
msg = Tix.Message(win,
bd=0, width=400, anchor=Tix.N,
text='Tix is a set of mega-widgets based on TK. This program \
demonstrates the widgets in the Tix widget set. You can choose the pages \
in this window to look at the corresponding widgets. \n\n\
To quit this program, choose the "File | Exit" command.\n\n\
For more information, see http://tix.sourceforge.net.')
title.pack(expand=1, fill=Tix.BOTH, padx=10, pady=10)
msg.pack(expand=1, fill=Tix.BOTH, padx=10, pady=10)
demo.welmsg = msg
return w
def MainTextFont(w):
global demo
if not demo.welmsg:
return
font = demo.welfont['value']
point = demo.welsize['value']
if font == 'Times Roman':
font = 'times'
fontstr = '%s %s' % (font, point)
demo.welmsg['font'] = fontstr
def ToggleHelp():
if demo.useBalloons.get() == '1':
demo.balloon['state'] = 'both'
else:
demo.balloon['state'] = 'none'
def MkChoosers(nb, name):
w = nb.page(name)
options = "label.padX 4"
til = Tix.LabelFrame(w, label='Chooser Widgets', options=options)
cbx = Tix.LabelFrame(w, label='tixComboBox', options=options)
ctl = Tix.LabelFrame(w, label='tixControl', options=options)
sel = Tix.LabelFrame(w, label='tixSelect', options=options)
opt = Tix.LabelFrame(w, label='tixOptionMenu', options=options)
fil = Tix.LabelFrame(w, label='tixFileEntry', options=options)
fbx = Tix.LabelFrame(w, label='tixFileSelectBox', options=options)
tbr = Tix.LabelFrame(w, label='Tool Bar', options=options)
MkTitle(til.frame)
MkCombo(cbx.frame)
MkControl(ctl.frame)
MkSelect(sel.frame)
MkOptMenu(opt.frame)
MkFileEnt(fil.frame)
MkFileBox(fbx.frame)
MkToolBar(tbr.frame)
# First column: comBox and selector
cbx.form(top=0, left=0, right='%33')
sel.form(left=0, right='&'+str(cbx), top=cbx)
opt.form(left=0, right='&'+str(cbx), top=sel, bottom=-1)
# Second column: title .. etc
til.form(left=cbx, top=0,right='%66')
ctl.form(left=cbx, right='&'+str(til), top=til)
fil.form(left=cbx, right='&'+str(til), top=ctl)
tbr.form(left=cbx, right='&'+str(til), top=fil, bottom=-1)
#
# Third column: file selection
fbx.form(right=-1, top=0, left='%66')
def MkCombo(w):
options="label.width %d label.anchor %s entry.width %d" % (10, Tix.E, 14)
static = Tix.ComboBox(w, label='Static', editable=0, options=options)
editable = Tix.ComboBox(w, label='Editable', editable=1, options=options)
history = Tix.ComboBox(w, label='History', editable=1, history=1,
anchor=Tix.E, options=options)
static.insert(Tix.END, 'January')
static.insert(Tix.END, 'February')
static.insert(Tix.END, 'March')
static.insert(Tix.END, 'April')
static.insert(Tix.END, 'May')
static.insert(Tix.END, 'June')
static.insert(Tix.END, 'July')
static.insert(Tix.END, 'August')
static.insert(Tix.END, 'September')
static.insert(Tix.END, 'October')
static.insert(Tix.END, 'November')
static.insert(Tix.END, 'December')
editable.insert(Tix.END, 'Angola')
editable.insert(Tix.END, 'Bangladesh')
editable.insert(Tix.END, 'China')
editable.insert(Tix.END, 'Denmark')
editable.insert(Tix.END, 'Ecuador')
history.insert(Tix.END, '/usr/bin/ksh')
history.insert(Tix.END, '/usr/local/lib/python')
history.insert(Tix.END, '/var/adm')
static.pack(side=Tix.TOP, padx=5, pady=3)
editable.pack(side=Tix.TOP, padx=5, pady=3)
history.pack(side=Tix.TOP, padx=5, pady=3)
states = ['Bengal', 'Delhi', 'Karnataka', 'Tamil Nadu']
def spin_cmd(w, inc):
idx = states.index(demo_spintxt.get()) + inc
if idx < 0:
idx = len(states) - 1
elif idx >= len(states):
idx = 0
# following doesn't work.
# return states[idx]
demo_spintxt.set(states[idx]) # this works
def spin_validate(w):
global states, demo_spintxt
try:
i = states.index(demo_spintxt.get())
except ValueError:
return states[0]
return states[i]
# why this procedure works as opposed to the previous one beats me.
def MkControl(w):
global demo_spintxt
options="label.width %d label.anchor %s entry.width %d" % (10, Tix.E, 13)
demo_spintxt = Tix.StringVar()
demo_spintxt.set(states[0])
simple = Tix.Control(w, label='Numbers', options=options)
spintxt = Tix.Control(w, label='States', variable=demo_spintxt,
options=options)
spintxt['incrcmd'] = lambda w=spintxt: spin_cmd(w, 1)
spintxt['decrcmd'] = lambda w=spintxt: spin_cmd(w, -1)
spintxt['validatecmd'] = lambda w=spintxt: spin_validate(w)
simple.pack(side=Tix.TOP, padx=5, pady=3)
spintxt.pack(side=Tix.TOP, padx=5, pady=3)
def MkSelect(w):
options = "label.anchor %s" % Tix.CENTER
sel1 = Tix.Select(w, label='Mere Mortals', allowzero=1, radio=1,
orientation=Tix.VERTICAL,
labelside=Tix.TOP,
options=options)
sel2 = Tix.Select(w, label='Geeks', allowzero=1, radio=0,
orientation=Tix.VERTICAL,
labelside= Tix.TOP,
options=options)
sel1.add('eat', text='Eat')
sel1.add('work', text='Work')
sel1.add('play', text='Play')
sel1.add('party', text='Party')
sel1.add('sleep', text='Sleep')
sel2.add('eat', text='Eat')
sel2.add('prog1', text='Program')
sel2.add('prog2', text='Program')
sel2.add('prog3', text='Program')
sel2.add('sleep', text='Sleep')
sel1.pack(side=Tix.LEFT, padx=5, pady=3, fill=Tix.X)
sel2.pack(side=Tix.LEFT, padx=5, pady=3, fill=Tix.X)
def MkOptMenu(w):
options='menubutton.width 15 label.anchor %s' % Tix.E
m = Tix.OptionMenu(w, label='File Format : ', options=options)
m.add_command('text', label='Plain Text')
m.add_command('post', label='PostScript')
m.add_command('format', label='Formatted Text')
m.add_command('html', label='HTML')
m.add_command('sep')
m.add_command('tex', label='LaTeX')
m.add_command('rtf', label='Rich Text Format')
m.pack(fill=Tix.X, padx=5, pady=3)
def MkFileEnt(w):
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='Press the "open file" icon button and a TixFileSelectDialog will popup.')
ent = Tix.FileEntry(w, label='Select a file : ')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
ent.pack(side=Tix.TOP, fill=Tix.X, padx=3, pady=3)
def MkFileBox(w):
"""The FileSelectBox is a Motif-style box with various enhancements.
For example, you can adjust the size of the two listboxes
and your past selections are recorded.
"""
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Tix FileSelectBox is a Motif-style box with various enhancements. For example, you can adjust the size of the two listboxes and your past selections are recorded.')
box = Tix.FileSelectBox(w)
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
box.pack(side=Tix.TOP, fill=Tix.X, padx=3, pady=3)
def MkToolBar(w):
"""The Select widget is also good for arranging buttons in a tool bar.
"""
global demo
options='frame.borderWidth 1'
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Select widget is also good for arranging buttons in a tool bar.')
bar = Tix.Frame(w, bd=2, relief=Tix.RAISED)
font = Tix.Select(w, allowzero=1, radio=0, label='', options=options)
para = Tix.Select(w, allowzero=0, radio=1, label='', options=options)
font.add('bold', bitmap='@' + demo.dir + '/bitmaps/bold.xbm')
font.add('italic', bitmap='@' + demo.dir + '/bitmaps/italic.xbm')
font.add('underline', bitmap='@' + demo.dir + '/bitmaps/underline.xbm')
font.add('capital', bitmap='@' + demo.dir + '/bitmaps/capital.xbm')
para.add('left', bitmap='@' + demo.dir + '/bitmaps/leftj.xbm')
para.add('right', bitmap='@' + demo.dir + '/bitmaps/rightj.xbm')
para.add('center', bitmap='@' + demo.dir + '/bitmaps/centerj.xbm')
para.add('justify', bitmap='@' + demo.dir + '/bitmaps/justify.xbm')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
bar.pack(side=Tix.TOP, fill=Tix.X, padx=3, pady=3)
font.pack({'in':bar}, side=Tix.LEFT, padx=3, pady=3)
para.pack({'in':bar}, side=Tix.LEFT, padx=3, pady=3)
def MkTitle(w):
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='There are many types of "chooser" widgets that allow the user to input different types of information')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
def MkScroll(nb, name):
w = nb.page(name)
options='label.padX 4'
sls = Tix.LabelFrame(w, label='Tix.ScrolledListBox', options=options)
swn = Tix.LabelFrame(w, label='Tix.ScrolledWindow', options=options)
stx = Tix.LabelFrame(w, label='Tix.ScrolledText', options=options)
MkSList(sls.frame)
MkSWindow(swn.frame)
MkSText(stx.frame)
sls.form(top=0, left=0, right='%33', bottom=-1)
swn.form(top=0, left=sls, right='%66', bottom=-1)
stx.form(top=0, left=swn, right=-1, bottom=-1)
def MkSList(w):
"""This TixScrolledListBox is configured so that it uses scrollbars
only when it is necessary. Use the handles to resize the listbox and
watch the scrollbars automatically appear and disappear. """
top = Tix.Frame(w, width=300, height=330)
bot = Tix.Frame(w)
msg = Tix.Message(top,
relief=Tix.FLAT, width=200, anchor=Tix.N,
text='This TixScrolledListBox is configured so that it uses scrollbars only when it is necessary. Use the handles to resize the listbox and watch the scrollbars automatically appear and disappear.')
list = Tix.ScrolledListBox(top, scrollbar='auto')
list.place(x=50, y=150, width=120, height=80)
list.listbox.insert(Tix.END, 'Alabama')
list.listbox.insert(Tix.END, 'California')
list.listbox.insert(Tix.END, 'Montana')
list.listbox.insert(Tix.END, 'New Jersey')
list.listbox.insert(Tix.END, 'New York')
list.listbox.insert(Tix.END, 'Pennsylvania')
list.listbox.insert(Tix.END, 'Washington')
rh = Tix.ResizeHandle(top, bg='black',
relief=Tix.RAISED,
handlesize=8, gridded=1, minwidth=50, minheight=30)
btn = Tix.Button(bot, text='Reset', command=lambda w=rh, x=list: SList_reset(w,x))
top.propagate(0)
msg.pack(fill=Tix.X)
btn.pack(anchor=Tix.CENTER)
top.pack(expand=1, fill=Tix.BOTH)
bot.pack(fill=Tix.BOTH)
list.bind('<Map>', func=lambda arg=0, rh=rh, list=list:
list.tk.call('tixDoWhenIdle', str(rh), 'attachwidget', str(list)))
def SList_reset(rh, list):
list.place(x=50, y=150, width=120, height=80)
list.update()
rh.attach_widget(list)
def MkSWindow(w):
"""The ScrolledWindow widget allows you to scroll any kind of Tk
widget. It is more versatile than a scrolled canvas widget.
"""
global demo
text = 'The Tix ScrolledWindow widget allows you to scroll any kind of Tk widget. It is more versatile than a scrolled canvas widget.'
file = os.path.join(demo.dir, 'bitmaps', 'tix.gif')
if not os.path.isfile(file):
text += ' (Image missing)'
top = Tix.Frame(w, width=330, height=330)
bot = Tix.Frame(w)
msg = Tix.Message(top,
relief=Tix.FLAT, width=200, anchor=Tix.N,
text=text)
win = Tix.ScrolledWindow(top, scrollbar='auto')
image1 = win.window.image_create('photo', file=file)
lbl = Tix.Label(win.window, image=image1)
lbl.pack(expand=1, fill=Tix.BOTH)
win.place(x=30, y=150, width=190, height=120)
rh = Tix.ResizeHandle(top, bg='black',
relief=Tix.RAISED,
handlesize=8, gridded=1, minwidth=50, minheight=30)
btn = Tix.Button(bot, text='Reset', command=lambda w=rh, x=win: SWindow_reset(w,x))
top.propagate(0)
msg.pack(fill=Tix.X)
btn.pack(anchor=Tix.CENTER)
top.pack(expand=1, fill=Tix.BOTH)
bot.pack(fill=Tix.BOTH)
win.bind('<Map>', func=lambda arg=0, rh=rh, win=win:
win.tk.call('tixDoWhenIdle', str(rh), 'attachwidget', str(win)))
def SWindow_reset(rh, win):
win.place(x=30, y=150, width=190, height=120)
win.update()
rh.attach_widget(win)
def MkSText(w):
"""The TixScrolledWindow widget allows you to scroll any kind of Tk
widget. It is more versatile than a scrolled canvas widget."""
top = Tix.Frame(w, width=330, height=330)
bot = Tix.Frame(w)
msg = Tix.Message(top,
relief=Tix.FLAT, width=200, anchor=Tix.N,
text='The Tix ScrolledWindow widget allows you to scroll any kind of Tk widget. It is more versatile than a scrolled canvas widget.')
win = Tix.ScrolledText(top, scrollbar='auto')
win.text['wrap'] = 'none'
win.text.insert(Tix.END, '''When -scrollbar is set to "auto", the
scrollbars are shown only when needed.
Additional modifiers can be used to force a
scrollbar to be shown or hidden. For example,
"auto -y" means the horizontal scrollbar
should be shown when needed but the vertical
scrollbar should always be hidden;
"auto +x" means the vertical scrollbar
should be shown when needed but the horizontal
scrollbar should always be shown, and so on.'''
)
win.place(x=30, y=150, width=190, height=100)
rh = Tix.ResizeHandle(top, bg='black',
relief=Tix.RAISED,
handlesize=8, gridded=1, minwidth=50, minheight=30)
btn = Tix.Button(bot, text='Reset', command=lambda w=rh, x=win: SText_reset(w,x))
top.propagate(0)
msg.pack(fill=Tix.X)
btn.pack(anchor=Tix.CENTER)
top.pack(expand=1, fill=Tix.BOTH)
bot.pack(fill=Tix.BOTH)
win.bind('<Map>', func=lambda arg=0, rh=rh, win=win:
win.tk.call('tixDoWhenIdle', str(rh), 'attachwidget', str(win)))
def SText_reset(rh, win):
win.place(x=30, y=150, width=190, height=120)
win.update()
rh.attach_widget(win)
def MkManager(nb, name):
w = nb.page(name)
options='label.padX 4'
pane = Tix.LabelFrame(w, label='Tix.PanedWindow', options=options)
note = Tix.LabelFrame(w, label='Tix.NoteBook', options=options)
MkPanedWindow(pane.frame)
MkNoteBook(note.frame)
pane.form(top=0, left=0, right=note, bottom=-1)
note.form(top=0, right=-1, bottom=-1)
def MkPanedWindow(w):
"""The PanedWindow widget allows the user to interactively manipulate
the sizes of several panes. The panes can be arranged either vertically
or horizontally.
"""
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The PanedWindow widget allows the user to interactively manipulate the sizes of several panes. The panes can be arranged either vertically or horizontally.')
group = Tix.LabelEntry(w, label='Newsgroup:', options='entry.width 25')
group.entry.insert(0,'comp.lang.python')
pane = Tix.PanedWindow(w, orientation='vertical')
p1 = pane.add('list', min=70, size=100)
p2 = pane.add('text', min=70)
list = Tix.ScrolledListBox(p1)
text = Tix.ScrolledText(p2)
list.listbox.insert(Tix.END, " 12324 Re: Tkinter is good for your health")
list.listbox.insert(Tix.END, "+ 12325 Re: Tkinter is good for your health")
list.listbox.insert(Tix.END, "+ 12326 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, " 12327 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, "+ 12328 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, " 12329 Re: Tix is even better for your health (Was: Tkinter is good...)")
list.listbox.insert(Tix.END, "+ 12330 Re: Tix is even better for your health (Was: Tkinter is good...)")
text.text['bg'] = list.listbox['bg']
text.text['wrap'] = 'none'
text.text.insert(Tix.END, """
Mon, 19 Jun 1995 11:39:52 comp.lang.python Thread 34 of 220
Lines 353 A new way to put text and bitmaps together iNo responses
ioi@blue.seas.upenn.edu Ioi K. Lam at University of Pennsylvania
Hi,
I have implemented a new image type called "compound". It allows you
to glue together a bunch of bitmaps, images and text strings together
to form a bigger image. Then you can use this image with widgets that
support the -image option. For example, you can display a text string
together with a bitmap, at the same time, inside a TK button widget.
""")
list.pack(expand=1, fill=Tix.BOTH, padx=4, pady=6)
text.pack(expand=1, fill=Tix.BOTH, padx=4, pady=6)
msg.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH)
group.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH)
pane.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH, expand=1)
def MkNoteBook(w):
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The NoteBook widget allows you to layout a complex interface into individual pages.')
# prefix = Tix.OptionName(w)
# if not prefix: prefix = ''
# w.option_add('*' + prefix + '*TixNoteBook*tagPadX', 8)
options = "entry.width %d label.width %d label.anchor %s" % (10, 18, Tix.E)
nb = Tix.NoteBook(w, ipadx=6, ipady=6, options=options)
nb.add('hard_disk', label="Hard Disk", underline=0)
nb.add('network', label="Network", underline=0)
# Frame for the buttons that are present on all pages
common = Tix.Frame(nb.hard_disk)
common.pack(side=Tix.RIGHT, padx=2, pady=2, fill=Tix.Y)
CreateCommonButtons(common)
# Widgets belonging only to this page
a = Tix.Control(nb.hard_disk, value=12, label='Access Time: ')
w = Tix.Control(nb.hard_disk, value=400, label='Write Throughput: ')
r = Tix.Control(nb.hard_disk, value=400, label='Read Throughput: ')
c = Tix.Control(nb.hard_disk, value=1021, label='Capacity: ')
a.pack(side=Tix.TOP, padx=20, pady=2)
w.pack(side=Tix.TOP, padx=20, pady=2)
r.pack(side=Tix.TOP, padx=20, pady=2)
c.pack(side=Tix.TOP, padx=20, pady=2)
common = Tix.Frame(nb.network)
common.pack(side=Tix.RIGHT, padx=2, pady=2, fill=Tix.Y)
CreateCommonButtons(common)
a = Tix.Control(nb.network, value=12, label='Access Time: ')
w = Tix.Control(nb.network, value=400, label='Write Throughput: ')
r = Tix.Control(nb.network, value=400, label='Read Throughput: ')
c = Tix.Control(nb.network, value=1021, label='Capacity: ')
u = Tix.Control(nb.network, value=10, label='Users: ')
a.pack(side=Tix.TOP, padx=20, pady=2)
w.pack(side=Tix.TOP, padx=20, pady=2)
r.pack(side=Tix.TOP, padx=20, pady=2)
c.pack(side=Tix.TOP, padx=20, pady=2)
u.pack(side=Tix.TOP, padx=20, pady=2)
msg.pack(side=Tix.TOP, padx=3, pady=3, fill=Tix.BOTH)
nb.pack(side=Tix.TOP, padx=5, pady=5, fill=Tix.BOTH, expand=1)
def CreateCommonButtons(f):
ok = Tix.Button(f, text='OK', width = 6)
cancel = Tix.Button(f, text='Cancel', width = 6)
ok.pack(side=Tix.TOP, padx=2, pady=2)
cancel.pack(side=Tix.TOP, padx=2, pady=2)
def MkDirList(nb, name):
w = nb.page(name)
options = "label.padX 4"
dir = Tix.LabelFrame(w, label='Tix.DirList', options=options)
fsbox = Tix.LabelFrame(w, label='Tix.ExFileSelectBox', options=options)
MkDirListWidget(dir.frame)
MkExFileWidget(fsbox.frame)
dir.form(top=0, left=0, right='%40', bottom=-1)
fsbox.form(top=0, left='%40', right=-1, bottom=-1)
def MkDirListWidget(w):
"""The TixDirList widget gives a graphical representation of the file
system directory and makes it easy for the user to choose and access
directories.
"""
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Tix DirList widget gives a graphical representation of the file system directory and makes it easy for the user to choose and access directories.')
dirlist = Tix.DirList(w, options='hlist.padY 1 hlist.width 25 hlist.height 16')
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
dirlist.pack(side=Tix.TOP, padx=3, pady=3)
def MkExFileWidget(w):
"""The TixExFileSelectBox widget is more user friendly than the Motif
style FileSelectBox. """
msg = Tix.Message(w,
relief=Tix.FLAT, width=240, anchor=Tix.N,
text='The Tix ExFileSelectBox widget is more user friendly than the Motif style FileSelectBox.')
# There's a bug in the ComboBoxes - the scrolledlistbox is destroyed
box = Tix.ExFileSelectBox(w, bd=2, relief=Tix.RAISED)
msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3)
box.pack(side=Tix.TOP, padx=3, pady=3)
###
### List of all the demos we want to show off
comments = {'widget' : 'Widget Demos', 'image' : 'Image Demos'}
samples = {'Balloon' : 'Balloon',
'Button Box' : 'BtnBox',
'Combo Box' : 'ComboBox',
'Compound Image' : 'CmpImg',
'Directory List' : 'DirList',
'Directory Tree' : 'DirTree',
'Control' : 'Control',
'Notebook' : 'NoteBook',
'Option Menu' : 'OptMenu',
'Paned Window' : 'PanedWin',
'Popup Menu' : 'PopMenu',
'ScrolledHList (1)' : 'SHList1',
'ScrolledHList (2)' : 'SHList2',
'Tree (dynamic)' : 'Tree'
}
# There are still a lot of demos to be translated:
## set root {
## {d "File Selectors" file }
## {d "Hierachical ListBox" hlist }
## {d "Tabular ListBox" tlist {c tixTList}}
## {d "Grid Widget" grid {c tixGrid}}
## {d "Manager Widgets" manager }
## {d "Scrolled Widgets" scroll }
## {d "Miscellaneous Widgets" misc }
## {d "Image Types" image }
## }
##
## set image {
## {d "Compound Image" cmpimg }
## {d "XPM Image" xpm {i pixmap}}
## }
##
## set cmpimg {
##done {f "In Buttons" CmpImg.tcl }
## {f "In NoteBook" CmpImg2.tcl }
## {f "Notebook Color Tabs" CmpImg4.tcl }
## {f "Icons" CmpImg3.tcl }
## }
##
## set xpm {
## {f "In Button" Xpm.tcl {i pixmap}}
## {f "In Menu" Xpm1.tcl {i pixmap}}
## }
##
## set file {
##added {f DirList DirList.tcl }
##added {f DirTree DirTree.tcl }
## {f DirSelectDialog DirDlg.tcl }
## {f ExFileSelectDialog EFileDlg.tcl }
## {f FileSelectDialog FileDlg.tcl }
## {f FileEntry FileEnt.tcl }
## }
##
## set hlist {
## {f HList HList1.tcl }
## {f CheckList ChkList.tcl {c tixCheckList}}
##done {f "ScrolledHList (1)" SHList.tcl }
##done {f "ScrolledHList (2)" SHList2.tcl }
##done {f Tree Tree.tcl }
##done {f "Tree (Dynamic)" DynTree.tcl {v win}}
## }
##
## set tlist {
## {f "ScrolledTList (1)" STList1.tcl {c tixTList}}
## {f "ScrolledTList (2)" STList2.tcl {c tixTList}}
## }
## global tcl_platform
## # This demo hangs windows
## if {$tcl_platform(platform) != "windows"} {
##na lappend tlist {f "TList File Viewer" STList3.tcl {c tixTList}}
## }
##
## set grid {
##na {f "Simple Grid" SGrid0.tcl {c tixGrid}}
##na {f "ScrolledGrid" SGrid1.tcl {c tixGrid}}
##na {f "Editable Grid" EditGrid.tcl {c tixGrid}}
## }
##
## set scroll {
## {f ScrolledListBox SListBox.tcl }
## {f ScrolledText SText.tcl }
## {f ScrolledWindow SWindow.tcl }
##na {f "Canvas Object View" CObjView.tcl {c tixCObjView}}
## }
##
## set manager {
## {f ListNoteBook ListNBK.tcl }
##done {f NoteBook NoteBook.tcl }
##done {f PanedWindow PanedWin.tcl }
## }
##
## set misc {
##done {f Balloon Balloon.tcl }
##done {f ButtonBox BtnBox.tcl }
##done {f ComboBox ComboBox.tcl }
##done {f Control Control.tcl }
## {f LabelEntry LabEntry.tcl }
## {f LabelFrame LabFrame.tcl }
## {f Meter Meter.tcl {c tixMeter}}
##done {f OptionMenu OptMenu.tcl }
##done {f PopupMenu PopMenu.tcl }
## {f Select Select.tcl }
## {f StdButtonBox StdBBox.tcl }
## }
##
stypes = {}
stypes['widget'] = ['Balloon', 'Button Box', 'Combo Box', 'Control',
'Directory List', 'Directory Tree',
'Notebook', 'Option Menu', 'Popup Menu', 'Paned Window',
'ScrolledHList (1)', 'ScrolledHList (2)', 'Tree (dynamic)']
stypes['image'] = ['Compound Image']
def MkSample(nb, name):
w = nb.page(name)
options = "label.padX 4"
pane = Tix.PanedWindow(w, orientation='horizontal')
pane.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH)
f1 = pane.add('list', expand='1')
f2 = pane.add('text', expand='5')
f1['relief'] = 'flat'
f2['relief'] = 'flat'
lab = Tix.LabelFrame(f1, label='Select a sample program:')
lab.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=5, pady=5)
lab1 = Tix.LabelFrame(f2, label='Source:')
lab1.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=5, pady=5)
slb = Tix.Tree(lab.frame, options='hlist.width 20')
slb.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=5)
stext = Tix.ScrolledText(lab1.frame, name='stext')
font = root.tk.eval('tix option get fixed_font')
stext.text.config(font=font)
frame = Tix.Frame(lab1.frame, name='frame')
run = Tix.Button(frame, text='Run ...', name='run')
view = Tix.Button(frame, text='View Source ...', name='view')
run.pack(side=Tix.LEFT, expand=0, fill=Tix.NONE)
view.pack(side=Tix.LEFT, expand=0, fill=Tix.NONE)
stext.text['bg'] = slb.hlist['bg']
stext.text['state'] = 'disabled'
stext.text['wrap'] = 'none'
stext.text['width'] = 80
frame.pack(side=Tix.BOTTOM, expand=0, fill=Tix.X, padx=7)
stext.pack(side=Tix.TOP, expand=0, fill=Tix.BOTH, padx=7)
slb.hlist['separator'] = '.'
slb.hlist['width'] = 25
slb.hlist['drawbranch'] = 0
slb.hlist['indent'] = 10
slb.hlist['wideselect'] = 1
slb.hlist['command'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'run')
slb.hlist['browsecmd'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'browse')
run['command'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'run')
view['command'] = lambda args=0, w=w,slb=slb,stext=stext,run=run,view=view: Sample_Action(w, slb, stext, run, view, 'view')
for type in ['widget', 'image']:
if type != 'widget':
x = Tix.Frame(slb.hlist, bd=2, height=2, width=150,
relief=Tix.SUNKEN, bg=slb.hlist['bg'])
slb.hlist.add_child(itemtype=Tix.WINDOW, window=x, state='disabled')
x = slb.hlist.add_child(itemtype=Tix.TEXT, state='disabled',
text=comments[type])
for key in stypes[type]:
slb.hlist.add_child(x, itemtype=Tix.TEXT, data=key,
text=key)
slb.hlist.selection_clear()
run['state'] = 'disabled'
view['state'] = 'disabled'
def Sample_Action(w, slb, stext, run, view, action):
global demo
hlist = slb.hlist
anchor = hlist.info_anchor()
if not anchor:
run['state'] = 'disabled'
view['state'] = 'disabled'
elif not hlist.info_parent(anchor):
# a comment
return
run['state'] = 'normal'
view['state'] = 'normal'
key = hlist.info_data(anchor)
title = key
prog = samples[key]
if action == 'run':
exec('import ' + prog)
w = Tix.Toplevel()
w.title(title)
rtn = eval(prog + '.RunSample')
rtn(w)
elif action == 'view':
w = Tix.Toplevel()
w.title('Source view: ' + title)
LoadFile(w, demo.dir + '/samples/' + prog + '.py')
elif action == 'browse':
ReadFile(stext.text, demo.dir + '/samples/' + prog + '.py')
def LoadFile(w, fname):
global root
b = Tix.Button(w, text='Close', command=w.destroy)
t = Tix.ScrolledText(w)
# b.form(left=0, bottom=0, padx=4, pady=4)
# t.form(left=0, bottom=b, right='-0', top=0)
t.pack()
b.pack()
font = root.tk.eval('tix option get fixed_font')
t.text.config(font=font)
t.text['bd'] = 2
t.text['wrap'] = 'none'
ReadFile(t.text, fname)
def ReadFile(w, fname):
old_state = w['state']
w['state'] = 'normal'
w.delete('0.0', Tix.END)
try:
f = open(fname)
lines = f.readlines()
for s in lines:
w.insert(Tix.END, s)
f.close()
finally:
# w.see('1.0')
w['state'] = old_state
if __name__ == '__main__':
root = Tix.Tk()
RunMain(root)
| gpl-3.0 |
sonaht/ansible | test/units/modules/network/nxos/test_nxos_bgp_neighbor_af.py | 16 | 3499 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp_neighbor_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpNeighborAfModule(TestNxosModule):
module = nxos_bgp_neighbor_af
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('', 'nxos_bgp_config.cfg')
self.load_config.return_value = None
def test_nxos_bgp_neighbor_af(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.3', afi='ipv4',
safi='unicast', route_reflector_client=True))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], [
'router bgp 65535', 'neighbor 3.3.3.3', 'address-family ipv4 unicast',
'route-reflector-client'
])
def test_nxos_bgp_neighbor_af_exists(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_neighbor_af_absent(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast', state='absent'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'no address-family ipv4 unicast']
)
def test_nxos_bgp_neighbor_af_advertise_map(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast',
advertise_map_exist=['my_advertise_map', 'my_exist_map']))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'advertise-map my_advertise_map exist my_exist_map']
)
def test_nxos_bgp_neighbor_af_advertise_map_non_exist(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast',
advertise_map_non_exist=['my_advertise_map', 'my_exist_map']))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'advertise-map my_advertise_map non-exist my_exist_map']
)
| gpl-3.0 |
maftieu/CouchPotatoServer | libs/pyasn1/type/namedtype.py | 200 | 4794 | # NamedType specification for constructed types
import sys
from pyasn1.type import tagmap
from pyasn1 import error
class NamedType:
isOptional = 0
isDefaulted = 0
def __init__(self, name, t):
self.__name = name; self.__type = t
def __repr__(self): return '%s(%s, %s)' % (
self.__class__.__name__, self.__name, self.__type
)
def getType(self): return self.__type
def getName(self): return self.__name
def __getitem__(self, idx):
if idx == 0: return self.__name
if idx == 1: return self.__type
raise IndexError()
class OptionalNamedType(NamedType):
isOptional = 1
class DefaultedNamedType(NamedType):
isDefaulted = 1
class NamedTypes:
def __init__(self, *namedTypes):
self.__namedTypes = namedTypes
self.__namedTypesLen = len(self.__namedTypes)
self.__minTagSet = None
self.__tagToPosIdx = {}; self.__nameToPosIdx = {}
self.__tagMap = { False: None, True: None }
self.__ambigiousTypes = {}
def __repr__(self):
r = '%s(' % self.__class__.__name__
for n in self.__namedTypes:
r = r + '%r, ' % (n,)
return r + ')'
def __getitem__(self, idx): return self.__namedTypes[idx]
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self.__namedTypesLen)
else:
def __bool__(self): return bool(self.__namedTypesLen)
def __len__(self): return self.__namedTypesLen
def getTypeByPosition(self, idx):
if idx < 0 or idx >= self.__namedTypesLen:
raise error.PyAsn1Error('Type position out of range')
else:
return self.__namedTypes[idx].getType()
def getPositionByType(self, tagSet):
if not self.__tagToPosIdx:
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
tagMap = self.__namedTypes[idx].getType().getTagMap()
for t in tagMap.getPosMap():
if t in self.__tagToPosIdx:
raise error.PyAsn1Error('Duplicate type %s' % (t,))
self.__tagToPosIdx[t] = idx
try:
return self.__tagToPosIdx[tagSet]
except KeyError:
raise error.PyAsn1Error('Type %s not found' % (tagSet,))
def getNameByPosition(self, idx):
try:
return self.__namedTypes[idx].getName()
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
if not self.__nameToPosIdx:
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
n = self.__namedTypes[idx].getName()
if n in self.__nameToPosIdx:
raise error.PyAsn1Error('Duplicate name %s' % (n,))
self.__nameToPosIdx[n] = idx
try:
return self.__nameToPosIdx[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def __buildAmbigiousTagMap(self):
ambigiousTypes = ()
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
t = self.__namedTypes[idx]
if t.isOptional or t.isDefaulted:
ambigiousTypes = (t, ) + ambigiousTypes
else:
ambigiousTypes = (t, )
self.__ambigiousTypes[idx] = NamedTypes(*ambigiousTypes)
def getTagMapNearPosition(self, idx):
if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
try:
return self.__ambigiousTypes[idx].getTagMap()
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionNearType(self, tagSet, idx):
if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
try:
return idx+self.__ambigiousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def genMinTagSet(self):
if self.__minTagSet is None:
for t in self.__namedTypes:
__type = t.getType()
tagSet = getattr(__type,'getMinTagSet',__type.getTagSet)()
if self.__minTagSet is None or tagSet < self.__minTagSet:
self.__minTagSet = tagSet
return self.__minTagSet
def getTagMap(self, uniq=False):
if self.__tagMap[uniq] is None:
tagMap = tagmap.TagMap()
for nt in self.__namedTypes:
tagMap = tagMap.clone(
nt.getType(), nt.getType().getTagMap(), uniq
)
self.__tagMap[uniq] = tagMap
return self.__tagMap[uniq]
| gpl-3.0 |
aldian/tensorflow | tensorflow/python/kernel_tests/conv_ops_3d_test.py | 17 | 18216 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu:
if not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16]
else:
return [dtypes.float64, dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, dtype, use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing numbers from 0 to 1.
# We keep the input tensor values fairly small to avoid overflowing float16
# during the conv3d.
x1 = [f * 1.0 / total_size_1 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 / total_size_2 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
dtype,
use_gpu=use_gpu)
results.append(result)
with self.test_session() as sess:
values = sess.run(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
tol = 1e-6
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, value.flatten(), atol=tol, rtol=tol)
def testConv3D1x1x1Filter(self):
expected_output = [
0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5, 0.59259259,
0.62962963, 0.77777778, 0.92592593, 0.85185185, 1.05555556, 1.25925926,
1.07407407, 1.33333333, 1.59259259, 1.2962963, 1.61111111, 1.92592593
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 4.2650463, 4.35763889, 4.45023148,
6.73032407, 6.89236111, 7.05439815, 7.22337963, 7.39930556, 7.57523148,
9.68865741, 9.93402778, 10.17939815, 10.18171296, 10.44097222,
10.70023148
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3DStrides(self):
expected_output = [
0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095, 0.13988095,
0.08452381, 0.26071429, 0.35238095, 0.36488095, 0.37738095, 0.38988095,
0.40238095, 0.23452381, 0.46071429, 0.61488095, 0.62738095, 0.63988095,
0.65238095, 0.66488095, 0.38452381, 1.12738095, 1.48988095, 1.50238095,
1.51488095, 1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095,
1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381, 1.52738095,
2.01488095, 2.02738095, 2.03988095, 2.05238095, 2.06488095, 1.18452381,
2.19404762, 2.88988095, 2.90238095, 2.91488095, 2.92738095, 2.93988095,
1.68452381, 2.39404762, 3.15238095, 3.16488095, 3.17738095, 3.18988095,
3.20238095, 1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095,
3.45238095, 3.46488095, 1.98452381
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778, 10.17939815
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611, 1.68998016,
1.6155754, 1.68179563, 1.74801587, 1.9280754, 2.01215278, 2.09623016,
1.98015873, 2.0672123, 2.15426587, 2.03224206, 2.12227183, 2.21230159,
4.4280754, 4.65500992, 4.88194444, 4.48015873, 4.71006944, 4.93998016,
4.53224206, 4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016,
4.8968254, 5.15054563, 5.40426587, 4.94890873, 5.20560516, 5.46230159
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 2.0162037, 2.06597222, 2.11574074,
9.68865741, 9.93402778, 10.17939815, 4.59953704, 4.73263889, 4.86574074
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [
0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037, 0.77777778,
0.92592593, 1.
]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513, 0.40306122,
0.41873178, 0.4340379, 0.19642857, 2.46938776, 2.50874636, 1.1377551,
2.74489796, 2.78425656, 1.26020408, 1.16873178, 1.1840379, 0.51785714,
1.09511662, 1.10604956, 0.44642857, 1.17164723, 1.18258017, 0.47704082,
0.3691691, 0.37244898, 0.125
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [
0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746, 2.744898,
2.784257
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[1.5625, 1.875])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
for data_type in self._DtypesToTest(use_gpu=use_gpu):
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if data_type == dtypes.float64:
tolerance = 1e-8
elif data_type == dtypes.float32:
tolerance = 5e-3
elif data_type == dtypes.float16:
tolerance = 1e-3
with self.test_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
new_strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
new_strides = strides
conv = nn_ops.conv3d(
input_tensor,
filter_tensor,
new_strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(
orig_input_tensor, input_shape, conv, output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(
filter_tensor, filter_shape, conv, output_shape)
if data_type != dtypes.float16:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
if __name__ == "__main__":
test.main()
| apache-2.0 |
wdv4758h/ZipPy | edu.uci.python.benchmark/src/benchmarks/sympy/sympy/matrices/expressions/matexpr.py | 5 | 12131 | from __future__ import print_function, division
from functools import wraps
from sympy.core import S, Symbol, sympify, Tuple, Integer, Basic, Expr
from sympy.core.decorators import call_highest_priority
from sympy.core.sympify import SympifyError, sympify
from sympy.functions import conjugate, adjoint
from sympy.matrices import ShapeError
from sympy.simplify import simplify
def _sympifyit(arg, retval=None):
# This version of _sympifyit sympifies MutableMatrix objects
def deco(func):
@wraps(func)
def __sympifyit_wrapper(a, b):
try:
b = sympify(b, strict=True)
return func(a, b)
except SympifyError:
return retval
return __sympifyit_wrapper
return deco
class MatrixExpr(Basic):
""" Superclass for Matrix Expressions
MatrixExprs represent abstract matrices, linear transformations represented
within a particular basis.
Examples
========
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 3, 3)
>>> y = MatrixSymbol('y', 3, 1)
>>> x = (A.T*A).I * A * y
See Also
========
MatrixSymbol
MatAdd
MatMul
Transpose
Inverse
"""
_op_priority = 11.0
is_Matrix = True
is_MatrixExpr = True
is_Identity = None
is_Inverse = False
is_Transpose = False
is_ZeroMatrix = False
is_MatAdd = False
is_MatMul = False
is_commutative = False
def __new__(cls, *args, **kwargs):
args = map(sympify, args)
return Basic.__new__(cls, *args, **kwargs)
# The following is adapted from the core Expr object
def __neg__(self):
return MatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return MatAdd(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return MatAdd(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return MatAdd(self, -other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return MatAdd(other, -self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return MatMul(self, other).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return MatMul(other, self).doit()
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other is S.NegativeOne:
return Inverse(self)
elif other is S.Zero:
return Identity(self.rows)
elif other is S.One:
return self
return MatPow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return self * other**S.NegativeOne
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
raise NotImplementedError()
#return MatMul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@property
def rows(self):
return self.shape[0]
@property
def cols(self):
return self.shape[1]
@property
def is_square(self):
return self.rows == self.cols
def _eval_conjugate(self):
from sympy.matrices.expressions.adjoint import Adjoint
from sympy.matrices.expressions.transpose import Transpose
return Adjoint(Transpose(self))
def _eval_inverse(self):
from sympy.matrices.expressions.inverse import Inverse
return Inverse(self)
def _eval_transpose(self):
return Transpose(self)
def _eval_power(self, exp):
return MatPow(self, exp)
def _eval_simplify(self, **kwargs):
if self.is_Atom:
return self
else:
return self.__class__(*[simplify(x, **kwargs) for x in self.args])
def _eval_adjoint(self):
from sympy.matrices.expressions.adjoint import Adjoint
return Adjoint(self)
def _entry(self, i, j):
raise NotImplementedError(
"Indexing not implemented for %s" % self.__class__.__name__)
def adjoint(self):
return adjoint(self)
def conjugate(self):
return conjugate(self)
def transpose(self):
from sympy.matrices.expressions.transpose import transpose
return transpose(self)
T = property(transpose, None, None, 'Matrix transposition.')
def inverse(self):
return self._eval_inverse()
@property
def I(self):
return self.inverse()
def valid_index(self, i, j):
def is_valid(idx):
return isinstance(idx, (int, Integer, Symbol, Expr))
return (is_valid(i) and is_valid(j) and
(0 <= i) != False and (i < self.rows) != False and
(0 <= j) != False and (j < self.cols) != False)
def __getitem__(self, key):
if not isinstance(key, tuple) and isinstance(key, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, key, (0, None, 1))
if isinstance(key, tuple) and len(key) == 2:
i, j = key
if isinstance(i, slice) or isinstance(j, slice):
from sympy.matrices.expressions.slice import MatrixSlice
return MatrixSlice(self, i, j)
i, j = sympify(i), sympify(j)
if self.valid_index(i, j) != False:
return self._entry(i, j)
else:
raise IndexError("Invalid indices (%s, %s)" % (i, j))
raise IndexError("Invalid index, wanted %s[i,j]" % self)
def as_explicit(self):
"""
Returns a dense Matrix with elements represented explicitly
Returns an object of type ImmutableMatrix.
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.as_explicit()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_mutable: returns mutable Matrix type
"""
from sympy.matrices.immutable import ImmutableMatrix
return ImmutableMatrix([[ self[i, j]
for j in range(self.cols)]
for i in range(self.rows)])
def as_mutable(self):
"""
Returns a dense, mutable matrix with elements represented explicitly
Examples
========
>>> from sympy import Identity
>>> I = Identity(3)
>>> I
I
>>> I.shape
(3, 3)
>>> I.as_mutable()
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
as_explicit: returns ImmutableMatrix
"""
return self.as_explicit().as_mutable()
def __array__(self):
from numpy import empty
a = empty(self.shape, dtype=object)
for i in range(self.rows):
for j in range(self.cols):
a[i, j] = self[i, j]
return a
def equals(self, other):
"""
Test elementwise equality between matrices, potentially of different
types
>>> from sympy import Identity, eye
>>> Identity(3).equals(eye(3))
True
"""
return self.as_explicit().equals(other)
def canonicalize(self):
return self
def as_coeff_mmul(self):
return 1, MatMul(self)
class MatrixElement(Expr):
parent = property(lambda self: self.args[0])
i = property(lambda self: self.args[1])
j = property(lambda self: self.args[2])
_diff_wrt = True
class MatrixSymbol(MatrixExpr):
"""Symbolic representation of a Matrix object
Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and
can be included in Matrix Expressions
>>> from sympy import MatrixSymbol, Identity
>>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix
>>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix
>>> A.shape
(3, 4)
>>> 2*A*B + Identity(3)
I + 2*A*B
"""
is_commutative = False
def __new__(cls, name, n, m):
n, m = sympify(n), sympify(m)
obj = Basic.__new__(cls, name, n, m)
return obj
def _hashable_content(self):
return(self.name, self.shape)
@property
def shape(self):
return self.args[1:3]
@property
def name(self):
return self.args[0]
def _eval_subs(self, old, new):
# only do substitutions in shape
shape = Tuple(*self.shape)._subs(old, new)
return MatrixSymbol(self.name, *shape)
def __call__(self, *args):
raise TypeError( "%s object is not callable" % self.__class__ )
def _entry(self, i, j):
return MatrixElement(self, i, j)
@property
def free_symbols(self):
return set((self,))
def doit(self, **hints):
if hints.get('deep', True):
return type(self)(self.name, self.args[1].doit(**hints),
self.args[2].doit(**hints))
else:
return self
def _eval_simplify(self, **kwargs):
return self
class Identity(MatrixExpr):
"""The Matrix Identity I - multiplicative identity
>>> from sympy.matrices import Identity, MatrixSymbol
>>> A = MatrixSymbol('A', 3, 5)
>>> I = Identity(3)
>>> I*A
A
"""
is_Identity = True
def __new__(cls, n):
return super(Identity, cls).__new__(cls, sympify(n))
@property
def rows(self):
return self.args[0]
@property
def cols(self):
return self.args[0]
@property
def shape(self):
return (self.args[0], self.args[0])
def _eval_transpose(self):
return self
def _eval_trace(self):
return self.rows
def _eval_inverse(self):
return self
def conjugate(self):
return self
def _entry(self, i, j):
if i == j:
return S.One
else:
return S.Zero
def _eval_determinant(self):
return S.One
class ZeroMatrix(MatrixExpr):
"""The Matrix Zero 0 - additive identity
>>> from sympy import MatrixSymbol, ZeroMatrix
>>> A = MatrixSymbol('A', 3, 5)
>>> Z = ZeroMatrix(3, 5)
>>> A+Z
A
>>> Z*A.T
0
"""
is_ZeroMatrix = True
def __new__(cls, m, n):
return super(ZeroMatrix, cls).__new__(cls, m, n)
@property
def shape(self):
return (self.args[0], self.args[1])
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
if other != 1 and not self.is_square:
raise ShapeError("Power of non-square matrix %s" % self)
if other == 0:
return Identity(self.rows)
return self
def _eval_transpose(self):
return ZeroMatrix(self.cols, self.rows)
def _eval_trace(self):
return S.Zero
def _eval_determinant(self):
return S.Zero
def conjugate(self):
return self
def _entry(self, i, j):
return S.Zero
def __nonzero__(self):
return False
__bool__ = __nonzero__
def matrix_symbols(expr):
return [sym for sym in expr.free_symbols if sym.is_Matrix]
from .matmul import MatMul
from .matadd import MatAdd
from .matpow import MatPow
from .transpose import Transpose
from .inverse import Inverse
| bsd-3-clause |
mkaluza/external_chromium_org | chrome/common/extensions/docs/server2/test_patcher.py | 121 | 1055 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from future import Future
from patcher import Patcher
class TestPatcher(Patcher):
def __init__(self, version, patched_files, patch_data):
self._version = version
self._patched_files = patched_files
self._patch_data = patch_data
self.get_version_count = 0
self.get_patched_files_count = 0
self.apply_count = 0
def GetVersion(self):
self.get_version_count += 1
return self._version
def GetPatchedFiles(self, version=None):
self.get_patched_files_count += 1
return self._patched_files
def Apply(self, paths, file_system, version=None):
self.apply_count += 1
try:
return Future(value=dict((path, self._patch_data[path])
for path in paths))
except KeyError:
raise FileNotFoundError('One of %s is deleted in the patch.' % paths)
def GetIdentity(self):
return self.__class__.__name__
| bsd-3-clause |
garbled1/ansible | lib/ansible/playbook/conditional.py | 68 | 10410 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import text_type
from ansible.module_utils._text import to_native
from ansible.playbook.attribute import FieldAttribute
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
def _get_attr_when(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
when = self._attributes['when']
if when is None:
when = []
if hasattr(self, '_get_parent_attribute'):
when = self._get_parent_attribute('when', extend=True, prepend=True)
return when
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
# this allows for direct boolean assignments to conditionals "when: False"
if isinstance(self.when, bool):
return self.when
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError(
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
if templar.is_template(conditional):
display.warning('when statements should not include jinja2 '
'templating delimiters such as {{ }} or {%% %%}. '
'Found: %s' % conditional)
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar._get_filters())
e.tests.update(templar._get_tests())
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
| gpl-3.0 |
r-kitaev/lucid-python-cerberus | pycerberus/validators/email.py | 2 | 3354 | # -*- coding: UTF-8 -*-
#
# The MIT License
#
# Copyright (c) 2010 Felix Schwarz <felix.schwarz@oss.schwarz.eu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from pycerberus.i18n import _
from pycerberus.validators.domain import DomainNameValidator
__all__ = ['EmailAddressValidator']
class EmailAddressValidator(DomainNameValidator):
"""A validator to check if an email address is syntactically correct.
Please note that there is no clear definition of an 'email address'. Some
parts are defined in consecutive RFCs, there is a notion of 'string that is
accepted by a MTA' and last but not least a fuzzy 'general expectation' what
an email address should be about.
Therefore this validator is currently extremly simple and does not handle
internationalized local parts/domains.
For the future I envision some extensions here:
- support internationalized domain names (possibly also encode to/
decode from idna) if specified by flag
- More flexible structure if there must be a second-level domain
Something that should not happen in this validator:
- Open SMTP connections to check if an account exists
- specify default domains if missing
These things can be implemented in derived validators
"""
def messages(self):
return {
'single_at': _(u"An email address must contain a single '@'."),
'invalid_email_character': _(u'Invalid character %(invalid_character)s in email address %(emailaddress)s.'),
}
def validate(self, emailaddress, context):
parts = emailaddress.split('@')
if len(parts) != 2:
self.error('single_at', emailaddress, context)
localpart, domain = parts
self.super(domain, context)
self._validate_localpart(localpart, emailaddress, context)
# --------------------------------------------------------------------------
# private helpers
def _validate_localpart(self, localpart, emailaddress, context):
match = re.search('([^a-zA-Z0-9\.\_])', localpart)
if match is not None:
values = dict(invalid_character=repr(match.group(1)), emailaddress=repr(emailaddress))
self.error('invalid_email_character', localpart, context, **values)
| mit |
dbckz/ansible | test/units/modules/network/nxos/test_nxos_command.py | 51 | 4103 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_command
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosCommandModule(TestNxosModule):
module = nxos_command
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
filename = 'nxos_command/%s.txt' % filename
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_nxos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Cisco'))
def test_nxos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Cisco'))
def test_nxos_command_wait_for(self):
wait_for = 'result[0] contains "Cisco NX-OS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_nxos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_nxos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_nxos_command_match_any(self):
wait_for = ['result[0] contains "Cisco"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_nxos_command_match_all(self):
wait_for = ['result[0] contains "Cisco"',
'result[0] contains "system image file"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_nxos_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/dialogflow/v2beta1/dialogflow-v2beta1-py/google/cloud/dialogflow_v2beta1/types/version.py | 1 | 7871 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.v2beta1',
manifest={
'Version',
'ListVersionsRequest',
'ListVersionsResponse',
'GetVersionRequest',
'CreateVersionRequest',
'UpdateVersionRequest',
'DeleteVersionRequest',
},
)
class Version(proto.Message):
r"""You can create multiple versions of your agent and publish them to
separate environments.
When you edit an agent, you are editing the draft agent. At any
point, you can save the draft agent as an agent version, which is an
immutable snapshot of your agent.
When you save the draft agent, it is published to the default
environment. When you create agent versions, you can publish them to
custom environments. You can create a variety of custom environments
for:
- testing
- development
- production
- etc.
For more information, see the `versions and environments
guide <https://cloud.google.com/dialogflow/docs/agents-versions>`__.
Attributes:
name (str):
Output only. The unique identifier of this agent version.
Supported formats:
- ``projects/<Project ID>/agent/versions/<Version ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/versions/<Version ID>``
description (str):
Optional. The developer-provided description
of this version.
version_number (int):
Output only. The sequential number of this
version. This field is read-only which means it
cannot be set by create and update methods.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The creation time of this
version. This field is read-only, i.e., it
cannot be set by create and update methods.
status (google.cloud.dialogflow_v2beta1.types.Version.VersionStatus):
Output only. The status of this version. This
field is read-only and cannot be set by create
and update methods.
"""
class VersionStatus(proto.Enum):
r"""The status of a version."""
VERSION_STATUS_UNSPECIFIED = 0
IN_PROGRESS = 1
READY = 2
FAILED = 3
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=2,
)
version_number = proto.Field(
proto.INT32,
number=3,
)
create_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
status = proto.Field(
proto.ENUM,
number=6,
enum=VersionStatus,
)
class ListVersionsRequest(proto.Message):
r"""The request message for
[Versions.ListVersions][google.cloud.dialogflow.v2beta1.Versions.ListVersions].
Attributes:
parent (str):
Required. The agent to list all versions from. Supported
formats:
- ``projects/<Project ID>/agent``
- ``projects/<Project ID>/locations/<Location ID>/agent``
page_size (int):
Optional. The maximum number of items to
return in a single page. By default 100 and at
most 1000.
page_token (str):
Optional. The next_page_token value returned from a previous
list request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListVersionsResponse(proto.Message):
r"""The response message for
[Versions.ListVersions][google.cloud.dialogflow.v2beta1.Versions.ListVersions].
Attributes:
versions (Sequence[google.cloud.dialogflow_v2beta1.types.Version]):
The list of agent versions. There will be a maximum number
of items returned based on the page_size field in the
request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Version',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetVersionRequest(proto.Message):
r"""The request message for
[Versions.GetVersion][google.cloud.dialogflow.v2beta1.Versions.GetVersion].
Attributes:
name (str):
Required. The name of the version. Supported formats:
- ``projects/<Project ID>/agent/versions/<Version ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/versions/<Version ID>``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateVersionRequest(proto.Message):
r"""The request message for
[Versions.CreateVersion][google.cloud.dialogflow.v2beta1.Versions.CreateVersion].
Attributes:
parent (str):
Required. The agent to create a version for. Supported
formats:
- ``projects/<Project ID>/agent``
- ``projects/<Project ID>/locations/<Location ID>/agent``
version (google.cloud.dialogflow_v2beta1.types.Version):
Required. The version to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
version = proto.Field(
proto.MESSAGE,
number=2,
message='Version',
)
class UpdateVersionRequest(proto.Message):
r"""The request message for
[Versions.UpdateVersion][google.cloud.dialogflow.v2beta1.Versions.UpdateVersion].
Attributes:
version (google.cloud.dialogflow_v2beta1.types.Version):
Required. The version to update. Supported formats:
- ``projects/<Project ID>/agent/versions/<Version ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/versions/<Version ID>``
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields
get updated.
"""
version = proto.Field(
proto.MESSAGE,
number=1,
message='Version',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""The request message for
[Versions.DeleteVersion][google.cloud.dialogflow.v2beta1.Versions.DeleteVersion].
Attributes:
name (str):
Required. The name of the version to delete. Supported
formats:
- ``projects/<Project ID>/agent/versions/<Version ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/versions/<Version ID>``
"""
name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
ObsidianBlk/GemRB--Unofficial- | gemrb/GUIScripts/bg1/LoadScreen.py | 7 | 1748 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003-2004 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# LoadScreen.py - display Loading screen
###################################################
import GemRB
from GUIDefines import *
LoadScreen = None
def SetLoadScreen ():
return
def StartLoadScreen ():
global LoadScreen
GemRB.LoadWindowPack ("guils", 640, 480)
LoadScreen = GemRB.LoadWindow (0)
LoadScreen.SetFrame ()
Middle = LoadScreen.GetControl (4)
#LoadPic = GemRB.GetGameString (STR_LOADMOS)
#if LoadPic=="":
LoadPic = "GTRSK00"+str(GemRB.Roll(1,5,0))
Middle.SetMOS (LoadPic)
Bar = LoadScreen.GetControl (0)
Progress = 0
GemRB.SetVar ("Progress", Progress)
Bar.SetVarAssoc ("Progress", Progress)
Bar.SetEvent (IE_GUI_PROGRESS_END_REACHED, EndLoadScreen)
Skull = LoadScreen.GetControl (3)
Skull.SetMOS ("GTRBPSK")
LoadScreen.SetVisible (WINDOW_VISIBLE)
return
def EndLoadScreen ():
Skull = LoadScreen.GetControl (3)
Skull.SetMOS ("GTRBPSK2")
LoadScreen.SetVisible (WINDOW_VISIBLE)
LoadScreen.Unload()
return
| gpl-2.0 |
dipeshbh/nodebbdb | node_modules/sitemap/env/lib/python2.7/site-packages/pip/pep425tags.py | 469 | 2969 | """Generate and work with PEP 425 Compatibility Tags."""
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
return ''.join(map(str, sys.version_info[:2]))
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, noarch=False):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
major = sys.version_info[0]
# Support all previous minor Python versions.
for minor in range(sys.version_info[1], -1, -1):
versions.append(''.join(map(str, (major, minor))))
impl = get_abbr_impl()
abis = []
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError as e: # Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
soabi = None
if soabi and soabi.startswith('cpython-'):
abis[0:0] = ['cp' + soabi.split('-', 1)[-1]]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = get_platform()
# Current version, current API (built specifically for our Python):
for abi in abis:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
| gpl-3.0 |
gangadharkadam/saloon_frappe_install | frappe/email/email_body.py | 8 | 8491 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.pdf import get_pdf
from frappe.email.smtp import get_outgoing_email_account
from frappe.utils import get_url, scrub_urls, strip, expand_relative_urls, cint, split_emails
import email.utils
from markdown2 import markdown
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None, attachments=None,
content=None, reply_to=None, cc=()):
"""send an html email as multipart with attachments and all"""
content = content or msg
emailobj = EMail(sender, recipients, subject, reply_to=reply_to, cc=cc)
if not content.strip().startswith("<"):
content = markdown(content)
emailobj.set_html(content, text_content, footer=footer, print_html=print_html, formatted=formatted)
if isinstance(attachments, dict):
attachments = [attachments]
for attach in (attachments or []):
emailobj.add_attachment(**attach)
return emailobj
class EMail:
"""
Wrapper on the email module. Email object represents emails to be sent to the client.
Also provides a clean way to add binary `FileData` attachments
Also sets all messages as multipart/alternative for cleaner reading in text-only clients
"""
def __init__(self, sender='', recipients=(), subject='', alternative=0, reply_to=None, cc=()):
from email.mime.multipart import MIMEMultipart
from email import Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, basestring):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = split_emails(recipients)
# remove null
recipients = filter(None, (strip(r) for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.msg_root = MIMEMultipart('mixed')
self.msg_multipart = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_multipart)
self.cc = cc or []
self.html_set = False
def set_html(self, message, text_content = None, footer=None, print_html=None, formatted=None):
"""Attach message in the html portion of multipart/alternative"""
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html)
# this is the first html part of a multi-part message,
# convert to text well
if not self.html_set:
if text_content:
self.set_text(expand_relative_urls(text_content))
else:
self.set_html_as_text(expand_relative_urls(formatted))
self.set_part_html(formatted)
self.html_set = True
def set_text(self, message):
"""
Attach message in the text portion of multipart/alternative
"""
from email.mime.text import MIMEText
part = MIMEText(message, 'plain', 'utf-8')
self.msg_multipart.attach(part)
def set_part_html(self, message):
from email.mime.text import MIMEText
part = MIMEText(message, 'html', 'utf-8')
self.msg_multipart.attach(part)
def set_html_as_text(self, html):
"""return html2text"""
import HTMLParser
from html2text import html2text
try:
self.set_text(html2text(html))
except HTMLParser.HTMLParseError:
pass
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
"""Append the message with MIME content to the root node (as attachment)"""
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
"""attach a file from the `FileData` table"""
from frappe.utils.file_manager import get_file
res = get_file(n)
if not res:
return
self.add_attachment(res[0], res[1])
def add_attachment(self, fname, fcontent, content_type=None):
"""add attachment"""
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if content_type is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
# Note: we should handle calculating the charset
if isinstance(fcontent, unicode):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
# Encode the payload using Base64
from email import encoders
encoders.encode_base64(part)
# Set the filename parameter
if fname:
part.add_header(b'Content-Disposition',
("attachment; filename=\"%s\"" % fname).encode('utf-8'))
self.msg_root.attach(part)
def add_pdf_attachment(self, name, html, options=None):
self.add_attachment(name, get_pdf(html, options), 'application/octet-stream')
def get_default_sender(self):
email_account = get_outgoing_email_account()
return email.utils.formataddr((email_account.name, email_account.get("sender") or email_account.get("email_id")))
def validate(self):
"""validate the email ids"""
from frappe.utils import validate_email_add
if not self.sender:
self.sender = self.get_default_sender()
validate_email_add(strip(self.sender), True)
self.reply_to = validate_email_add(strip(self.reply_to) or self.sender, True)
self.recipients = [strip(r) for r in self.recipients]
self.cc = [strip(r) for r in self.cc]
for e in self.recipients + (self.cc or []):
validate_email_add(e, True)
def set_message_id(self, message_id):
self.msg_root["Message-Id"] = "<{0}@{1}>".format(message_id, frappe.local.site)
def make(self):
"""build into msg_root"""
headers = {
"Subject": strip(self.subject).encode("utf-8"),
"From": self.sender.encode("utf-8"),
"To": ', '.join(self.recipients).encode("utf-8"),
"Date": email.utils.formatdate(),
"Reply-To": self.reply_to.encode("utf-8") if self.reply_to else None,
"CC": ', '.join(self.cc).encode("utf-8") if self.cc else None,
b'X-Frappe-Site': get_url().encode('utf-8'),
}
# reset headers as values may be changed.
for key, val in headers.iteritems():
if self.msg_root.has_key(key):
del self.msg_root[key]
self.msg_root[key] = val
# call hook to enable apps to modify msg_root before sending
for hook in frappe.get_hooks("make_email_body_message"):
frappe.get_attr(hook)(self)
def as_string(self):
"""validate, build message and convert to string"""
self.validate()
self.make()
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None):
# imported here to avoid cyclic import
message = scrub_urls(message)
email_account = get_outgoing_email_account(False)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"content": message,
"signature": get_signature(email_account),
"footer": get_footer(email_account, footer),
"title": subject,
"print_html": print_html,
"subject": subject
})
return rendered_email
def get_signature(email_account):
if email_account and email_account.add_signature and email_account.signature:
return "<br><br>" + email_account.signature
else:
return ""
def get_footer(email_account, footer=None):
"""append a footer (signature)"""
footer = footer or ""
if email_account and email_account.footer:
footer += '<div style="margin: 15px auto;">{0}</div>'.format(email_account.footer)
footer += "<!--unsubscribe link here-->"
company_address = frappe.db.get_default("email_footer_address")
if company_address:
footer += '<div style="margin: 15px auto; text-align: center; color: #8d99a6">{0}</div>'\
.format(company_address.replace("\n", "<br>"))
if not cint(frappe.db.get_default("disable_standard_email_footer")):
for default_mail_footer in frappe.get_hooks("default_mail_footer"):
footer += '<div style="margin: 15px auto;">{0}</div>'.format(default_mail_footer)
return footer
| mit |
madflojo/cloudroutes-service | src/monitors/checks/http-get-statuscode/__init__.py | 5 | 2298 | #!/usr/bin/python
######################################################################
# Cloud Routes Availability Manager: http-get-statuscode module
# ------------------------------------------------------------------
# This is a moduel for performing http get based health checks.
# This will return true if no errors or false if there are errors
# ------------------------------------------------------------------
# Version: Alpha.20140618
# Original Author: Benjamin J. Cane - madflojo@cloudrout.es
# Contributors:
# - your name here
######################################################################
import requests
# TODO: There should be a common lib where these utility functions can be
# stored. For now, we duplicate code :-(
def ParseHeaders(headers_str):
headers = {}
for header in str.splitlines(str(headers_str)):
header = header.strip()
# Ignore empty lines
if not header:
continue
key, value = header.split(':')
key = key.strip()
value = value.strip()
assert key
assert value
headers[key] = value
return headers
def check(**kwargs):
""" Perform a http get request and validate the return code """
jdata = kwargs['jdata']
logger = kwargs['logger']
headers = {}
if 'extra_headers' in jdata['data']:
headers = ParseHeaders(jdata['data']['extra_headers'])
headers['host'] = jdata['data']['host']
timeout = 3.00
url = jdata['data']['url']
try:
result = requests.get(
url, timeout=timeout, headers=headers, verify=False, stream=True)
except Exception as e:
line = 'http-get-statuscode: Reqeust to {0} sent for monitor {1} - ' \
'had an exception: {2}'.format(url, jdata['cid'], e)
logger.error(line)
return False
rcode = str(result.status_code)
result.close()
if rcode in jdata['data']['codes']:
line = 'http-get-statuscode: Reqeust to {0} sent for monitor {1} - ' \
'Successful'.format(url, jdata['cid'])
logger.info(line)
return True
else:
line = 'http-get-statuscode: Reqeust to {0} sent for monitor {1} - ' \
'Failure'.format(url, jdata['cid'])
logger.info(line)
return False
| apache-2.0 |
dragondjf/QMusic | src/controllers/utilworker.py | 1 | 1618 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
from PyQt5.QtCore import (QObject, pyqtSignal, pyqtSlot, pyqtProperty, QUrl)
from PyQt5.QtGui import QCursor, QDesktopServices
from .utils import registerContext, duration_to_string
class UtilWorker(QObject):
__contextName__ = 'UtilWorker'
@registerContext
def __init__(self, parent=None):
super(UtilWorker, self).__init__(parent)
@pyqtSlot(int, result='QString')
def int_to_string(self, value):
return str(value)
@pyqtSlot(int, result='QString')
def duration_to_string(self, duration):
return duration_to_string(duration)
@pyqtSlot(int, result='QString')
def size_to_string(self, size):
''' convert file size byte to MB. '''
return '%.2f MB' % (float(size) / (1024 * 1024))
@pyqtSlot(int, result='QString')
def bitrate_to_string(self, bitrate):
''' convert file size byte to K bit. '''
return '%.2f K' % (float(bitrate) / 1000)
@pyqtSlot(int, result='QString')
def sampleRate_to_string(self, sampleRate):
''' convert file size byte to KHz. '''
return '%.2f KHz' % (float(sampleRate) / 1000)
@pyqtSlot(int, result='QString')
def progress_to_string(self, progress):
''' convert progress to string like 99%. '''
return '%d%%' % float(progress)
@pyqtSlot('QString', result='QString')
def basename(self, path):
return os.path.basename(path)
@pyqtSlot('QString')
def openUrl(self, path):
QDesktopServices.openUrl(QUrl.fromLocalFile(path))
utilWorker = UtilWorker()
| lgpl-2.1 |
geekboxzone/lollipop_external_chromium_org | chrome/installer/tools/setup_timer.py | 121 | 5080 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script reports time spent by setup.exe in each install/update phase.
It does so by probing for InstallerExtraCode1 changes in the registry and can
run besides any setup.exe. It's best to launch it before setup.exe itself
starts, but can also time remaining stages if launched half-way through.
Refer to InstallerStage in chrome/installer/util/util_constants.h for a brief
description of each stage.
Note that the stages are numbered in the order they were added to setup's
implementation, not in the order they are meant to occur.
This script never ends, it will endlessly report stage timings until killed.
"""
import _winreg
import json
import optparse
import sys
import time
def TimeSetupStages(hive_str, state_key, product_guid, observed_code):
"""Observes setup.exe and reports about timings for each install/update stage.
Does so by observing the registry value |observed_code| in the key at:
|hive_str_|\|state_key|\|product_guid|.
"""
hive = (_winreg.HKEY_LOCAL_MACHINE if hive_str == 'HKLM' else
_winreg.HKEY_CURRENT_USER)
key = 0
try:
key = _winreg.OpenKey(hive, state_key + product_guid, 0, _winreg.KEY_READ)
except WindowsError as e:
print 'Error opening %s\\%s\\%s: %s' % (hive_str, state_key, product_guid,
e)
return
timings = []
start_time = 0
saw_start = False
current_stage = 0
try:
current_stage, value_type = _winreg.QueryValueEx(key, observed_code)
assert value_type == _winreg.REG_DWORD
print 'Starting in already ongoing stage %u' % current_stage
start_time = time.clock()
except WindowsError:
print 'No ongoing stage, waiting for next install/update cycle...'
while True:
new_stage = 0
try:
new_stage, value_type = _winreg.QueryValueEx(key, observed_code)
assert value_type == _winreg.REG_DWORD
except WindowsError:
# Handle the non-existant case by simply leaving |new_stage == 0|.
pass
if current_stage == new_stage:
# Keep probing until a change is seen.
time.sleep(0.01)
continue
if current_stage != 0:
# Round elapsed time to 2 digits precision; anything beyond that would be
# bogus given the above polling loop's precision.
elapsed_time = round(time.clock() - start_time, 2)
if saw_start:
print '%s: Stage %u took %.2f seconds.' % (
time.strftime("%x %X", time.localtime()), current_stage,
elapsed_time)
timings.append({'stage': current_stage, 'time': elapsed_time})
else:
print '%s: The remainder of stage %u took %.2f seconds.' % (
time.strftime("%x %X", time.localtime()), current_stage,
elapsed_time)
# Log this timing, but mark that it was already ongoing when this script
# started timing it.
timings.append({'stage': current_stage, 'time': elapsed_time,
'status': 'missed_start'})
if new_stage != 0:
print '%s: Starting stage %u...' % (
time.strftime("%x %X", time.localtime()), new_stage)
saw_start = True
else:
print '%s: Install/update complete, stages timings:' % (
time.strftime("%x %X", time.localtime()))
print json.dumps(timings, indent=2, sort_keys=True)
timings = []
print '%s: No more stages, waiting for next install/update cycle...' % (
time.strftime("%x %X", time.localtime()))
current_stage = new_stage
start_time = time.clock()
def main():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage,
description="Times Chrome's installer stages.")
parser.add_option('--hive', default='HKLM',
help='The hive to observe: "HKLM" for system-level '
'installs, "HKCU" for user-level installs, defaults '
'to HKLM.')
parser.add_option('--state-key',
default='Software\\Google\\Update\\ClientState\\',
help="The client state key to observe, defaults to Google "
"Update's.")
parser.add_option('--product-guid',
default='{4DC8B4CA-1BDA-483e-B5FA-D3C12E15B62D}',
help="The GUID of the product to observe: defaults to "
"the GUID for the Google Chrome Binaries which is the "
"one being written to on updates.")
parser.add_option('--observed-code', default='InstallerExtraCode1',
help='The installer code to observe under '
'|state_key|\\|product_guid|, defaults to '
'InstallerExtraCode1.')
options, _ = parser.parse_args()
TimeSetupStages(options.hive, options.state_key, options.product_guid,
options.observed_code)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
ndardenne/pymatgen | pymatgen/io/abinit/scheduler_error_handlers.py | 11 | 3632 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
Error handlers for errors originating from the Submission systems.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
try:
from custodian.custodian import ErrorHandler
except ImportError:
ErrorHandler = object
# TODO (from SP): Pls move this somewhere else. Custodian and Workflow stuff
# really shouldn't be in pymatgen.
class SchedulerErrorHandler(ErrorHandler):
"""
Custodian error handler for scheduler related errors
scheduler_adapter takes the scheduler, it should at least provide a .name attribute indentifying the scheduler,
currently 'slurm' is supported.
If the scheduler adapter also provides the methods defined in CorrectorProtocolScheduler, problems can also be
fixed by .apply_corrections.
If a application_adapter is also provided and it provides the methods defined in CorrectorProtocolApplication
problems can also be fixed a the level of the application, e.g. making the application require less memory.
"""
def __init__(self, scheduler_adapter, application_adapter=None, err_file='queue.err', out_file='queue.out',
run_err_file='run.err', batch_err_file='batch.err'):
self.scheduler_adapter = scheduler_adapter
self.application_adapter = application_adapter
self.err_file = err_file
self.out_file = out_file
self.run_err_file = run_err_file
self.batch_err_file = batch_err_file
self.errors = []
self.corrections = {}
def check(self):
"""
Check for the defined errors, put all found errors in self.errors, return True if any were found False if no
errors were found
"""
parser = get_parser(self.scheduler_adapter.name, err_file=self.err_file, out_file=self.out_file,
run_err_file=self.run_err_file, batch_err_file=self.batch_err_file)
parser.parse()
self.errors = parser.errors
if len(self.errors) == 0:
return False
else:
return True
def correct(self):
"""
For custodian compatibility
"""
self.return_corrections()
def return_corrections(self):
for error in self.errors:
self.corrections.update({error: {'scheduler_adapter_solutions': [], 'aplication_adapter_solutions': []}})
self.corrections[error]['scheduler_adapter_solutions'].append(error.scheduler_adapter_solutions)
self.corrections[error]['application_adapter_solutions'].append(error.application_adapter_solutions)
return self.corrections
def apply_corrections(self):
"""
Method to directly apply the corrections.
"""
for error in self.errors:
for solution in error.scheduler_adapter_solutions:
if self.scheduler_adapter is not None:
if self.scheduler_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
for solution in error.application_adapter_solutions:
if self.application_adapter is not None:
if self.application_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
return False
| mit |
DinoCow/airflow | airflow/operators/mysql_operator.py | 7 | 1146 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.mysql.operators.mysql`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.mysql.operators.mysql import MySqlOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.mysql.operators.mysql`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
golismero/golismero-devel | thirdparty_libs/netaddr/ip/__init__.py | 9 | 70876 | #-----------------------------------------------------------------------------
# Copyright (c) 2008-2013, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Routines for IPv4 and IPv6 addresses, subnets and ranges."""
import sys as _sys
import re as _re
from netaddr.core import AddrFormatError, AddrConversionError, num_bits, \
DictDotLookup, NOHOST, N, INET_PTON, P, ZEROFILL, Z
from netaddr.strategy import ipv4 as _ipv4, ipv6 as _ipv6
from netaddr.compat import _sys_maxint, _iter_range, _is_str, _int_type, \
_str_type
#-----------------------------------------------------------------------------
# Pre-compiled regexen used by cidr_merge() function.
RE_CIDR_ADJACENT = _re.compile(r'^([01]+)0 \1[1]$')
RE_CIDR_WITHIN = _re.compile(r'^([01]+) \1[10]+$')
RE_VALID_CIDR_BITS = _re.compile('^[01]+$')
#-----------------------------------------------------------------------------
class BaseIP(object):
"""
An abstract base class for common operations shared between various IP
related subclasses.
"""
__slots__ = ('_value', '_module')
def __init__(self):
"""Constructor."""
self._value = None
self._module = None
def _set_value(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.max_int:
raise AddrFormatError('value out of bounds for an %s address!' \
% self._module.family_name)
self._value = value
value = property(lambda self: self._value, _set_value,
doc='a positive integer representing the value of IP address/subnet.')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
return NotImplemented
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPAddress`
correctly.
"""
return NotImplemented
def __hash__(self):
"""
:return: A hash value uniquely indentifying this IP object.
"""
return hash(self.key())
def __eq__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() == other.key()
except (AttributeError, TypeError):
return NotImplemented
def __ne__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
not equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() != other.key()
except (AttributeError, TypeError):
return NotImplemented
def __lt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() < other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __le__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() <= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __gt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() > other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __ge__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() >= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def is_unicast(self):
""":return: ``True`` if this IP is unicast, ``False`` otherwise"""
return not self.is_multicast()
def is_multicast(self):
""":return: ``True`` if this IP is multicast, ``False`` otherwise"""
if self._module == _ipv4:
return self in IPV4_MULTICAST
elif self._module == _ipv6:
return self in IPV6_MULTICAST
def is_loopback(self):
"""
:return: ``True`` if this IP is loopback address (not for network
transmission), ``False`` otherwise.
References: RFC 3330 and 4291.
"""
if self._module.version == 4:
return self in IPV4_LOOPBACK
elif self._module.version == 6:
return self == IPV6_LOOPBACK
def is_private(self):
"""
:return: ``True`` if this IP is for internal/private use only
(i.e. non-public), ``False`` otherwise. Reference: RFCs 1918,
3330, 4193, 3879 and 2365.
"""
if self._module.version == 4:
for cidr in IPV4_PRIVATE:
if self in cidr:
return True
elif self._module.version == 6:
for cidr in IPV6_PRIVATE:
if self in cidr:
return True
if self.is_link_local():
return True
return False
def is_link_local(self):
"""
:return: ``True`` if this IP is link-local address ``False`` otherwise.
Reference: RFCs 3927 and 4291.
"""
if self._module.version == 4:
return self in IPV4_LINK_LOCAL
elif self._module.version == 6:
return self in IPV6_LINK_LOCAL
def is_reserved(self):
"""
:return: ``True`` if this IP is in IANA reserved range, ``False``
otherwise. Reference: RFCs 3330 and 3171.
"""
if self._module.version == 4:
for cidr in IPV4_RESERVED:
if self in cidr:
return True
elif self._module.version == 6:
for cidr in IPV6_RESERVED:
if self in cidr:
return True
return False
def is_ipv4_mapped(self):
"""
:return: ``True`` if this IP is IPv4-compatible IPv6 address, ``False``
otherwise.
"""
return self._module.version == 6 and (self._value >> 32) == 0xffff
def is_ipv4_compat(self):
"""
:return: ``True`` if this IP is IPv4-mapped IPv6 address, ``False``
otherwise.
"""
return self._module.version == 6 and (self._value >> 32) == 0
@property
def info(self):
"""
A record dict containing IANA registration details for this IP address
if available, None otherwise.
"""
# Lazy loading of IANA data structures.
from netaddr.ip.iana import query
return DictDotLookup(query(self))
@property
def version(self):
"""the IP protocol version represented by this IP object."""
return self._module.version
#-----------------------------------------------------------------------------
class IPAddress(BaseIP):
"""
An individual IPv4 or IPv6 address without a net mask or subnet prefix.
To support these and other network based operations, see `IPNetwork`.
"""
__slots__ = ()
def __init__(self, addr, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address which may be represented in an
accepted string format, as an unsigned integer or as another
IPAddress object (copy construction).
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Supported constants are
INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
super(IPAddress, self).__init__()
if isinstance(addr, BaseIP):
# Copy constructor.
if version is not None and version != addr._module.version:
raise ValueError('cannot switch IP versions using '
'copy constructor!')
self._value = addr._value
self._module = addr._module
else:
# Explicit IP address version.
if version is not None:
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('%r is an invalid IP version!' % version)
has_upper = hasattr(addr, 'upper')
if has_upper and '/' in addr:
raise ValueError('%s() does not support netmasks or subnet' \
' prefixes! See documentation for details.'
% self.__class__.__name__)
if self._module is None:
# IP version is implicit, detect it from addr.
if isinstance(addr, _int_type):
try:
if 0 <= int(addr) <= _ipv4.max_int:
self._value = int(addr)
self._module = _ipv4
elif _ipv4.max_int < int(addr) <= _ipv6.max_int:
self._value = int(addr)
self._module = _ipv6
except ValueError:
pass
else:
for module in _ipv4, _ipv6:
try:
self._value = module.str_to_int(addr, flags)
except:
continue
else:
self._module = module
break
if self._module is None:
raise AddrFormatError('failed to detect a valid IP ' \
'address from %r' % addr)
else:
# IP version is explicit.
if has_upper:
try:
self._value = self._module.str_to_int(addr, flags)
except AddrFormatError:
raise AddrFormatError('base address %r is not IPv%d'
% (addr, self._module.version))
else:
if 0 <= int(addr) <= self._module.max_int:
self._value = int(addr)
else:
raise AddrFormatError('bad address format: %r' % addr)
def __getstate__(self):
""":returns: Pickled state of an `IPAddress` object."""
return self._value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPAddress` object.
"""
value, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state: %s' \
% str(state))
def netmask_bits(self):
"""
@return: If this IP is a valid netmask, the number of non-zero
bits are returned, otherwise it returns the width in bits for
the IP address version.
"""
if not self.is_netmask():
return self._module.width
i_val = self._value
numbits = 0
while i_val > 0:
if i_val & 1 == 1:
break
numbits += 1
i_val >>= 1
mask_length = self._module.width - numbits
if not 0 <= mask_length <= self._module.width:
raise ValueError('Unexpected mask length %d for address type!' \
% mask_length)
return mask_length
def is_hostmask(self):
"""
:return: ``True`` if this IP address host mask, ``False`` otherwise.
"""
int_val = self._value + 1
return (int_val & (int_val - 1) == 0)
def is_netmask(self):
"""
:return: ``True`` if this IP address network mask, ``False`` otherwise.
"""
int_val = (self._value ^ self._module.max_int) + 1
return (int_val & (int_val - 1) == 0)
def __iadd__(self, num):
"""
Increases the numerical value of this IPAddress by num.
An IndexError is raised if result exceeds maximum IP address value or
is less than zero.
:param num: size of IP address increment.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __isub__(self, num):
"""
Decreases the numerical value of this IPAddress by num.
An IndexError is raised if result is less than zero or exceeds maximum
IP address value.
:param num: size of IP address decrement.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __add__(self, num):
"""
Add the numerical value of this IP address to num and provide the
result as a new IPAddress object.
:param num: size of IP address increase.
:return: a new IPAddress object with its numerical value increased by num.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self._module.version)
raise IndexError('result outside valid IP address boundary!')
__radd__ = __add__
def __sub__(self, num):
"""
Subtract the numerical value of this IP address from num providing
the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self._module.version)
raise IndexError('result outside valid IP address boundary!')
def __rsub__(self, num):
"""
Subtract num (lvalue) from the numerical value of this IP address
(rvalue) providing the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = num - self._value
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self._module.version)
raise IndexError('result outside valid IP address boundary!')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
# NB - we return the value here twice because this IP Address may
# be sorted with a list of networks and it should still end up
# in the expected order.
return self._module.version, self._value
def sort_key(self):
""":return: A key tuple used to compare and sort this `IPAddress` correctly."""
return self._module.version, self._value, self._module.width
def __int__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __long__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __oct__(self):
""":return: an octal string representation of this IP address."""
# Python 2.x
if self._value == 0:
return '0'
return '0%o' % self._value
def __hex__(self):
""":return: a hexadecimal string representation of this IP address."""
# Python 2.x
return '0x%x' % self._value
def __index__(self):
"""
:return: return the integer value of this IP address when called by \
hex(), oct() or bin().
"""
# Python 3.x
return self._value
def bits(self, word_sep=None):
"""
:param word_sep: (optional) the separator to insert between words.
Default: None - use default separator for address type.
:return: the value of this IP address as a binary digit string."""
return self._module.int_to_bits(self._value, word_sep)
@property
def packed(self):
"""The value of this IP address as a packed binary string."""
return self._module.int_to_packed(self._value)
@property
def words(self):
"""
A list of unsigned integer words (octets for IPv4, hextets for IPv6)
found in this IP address.
"""
return self._module.int_to_words(self._value)
@property
def bin(self):
"""
The value of this IP adddress in standard Python binary
representational form (0bxxx). A back port of the format provided by
the builtin bin() function found in Python 2.6.x and higher.
"""
return self._module.int_to_bin(self._value)
@property
def reverse_dns(self):
"""The reverse DNS lookup record for this IP address"""
return self._module.int_to_arpa(self._value)
def ipv4(self):
"""
Raises an `AddrConversionError` if IPv6 address cannot be converted
to IPv4.
:return: A numerically equivalent version 4 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self._module.version == 4:
ip = klass(self._value, 4)
elif self._module.version == 6:
if 0 <= self._value <= _ipv4.max_int:
ip = klass(self._value, 4)
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
ip = klass(self._value - 0xffff00000000, 4)
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: The IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self._module.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass(self._value - 0xffff00000000, 6)
else:
ip = klass(self._value, 6)
elif self._module.version == 4:
# IPv4-Compatible IPv6 address
ip = klass(self._value, 6)
if not ipv4_compatible:
# IPv4-Mapped IPv6 address
ip = klass(0xffff00000000 + self._value, 6)
return ip
def format(self, dialect=None):
"""
Only relevant for IPv6 addresses. Has no effect for IPv4.
:param dialect: An ipv6_* dialect class.
:return: an alternate string representation for this IP address.
"""
if dialect is not None:
if not hasattr(dialect, 'word_fmt'):
raise TypeError(
'custom dialects should subclass ipv6_verbose!')
return self._module.int_to_str(self._value, dialect=dialect)
def __or__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise OR (x | y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value | int(other), self._module.version)
def __and__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise AND (x & y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value & int(other), self._module.version)
def __xor__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise exclusive OR (x ^ y) between the integer value of
this IP address and ``other``.
"""
return self.__class__(self._value ^ int(other), self._module.version)
def __lshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value left shifted by ``numbits``.
"""
return self.__class__(self._value << numbits, self._module.version)
def __rshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value right shifted by ``numbits``.
"""
return self.__class__(self._value >> numbits, self._module.version)
def __nonzero__(self):
""":return: ``True`` if the numerical value of this IP address is not \
zero, ``False`` otherwise."""
# Python 2.x.
return bool(self._value)
__bool__ = __nonzero__ # Python 3.x.
def __str__(self):
""":return: IP address in presentational format"""
return self._module.int_to_str(self._value)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPListMixin(object):
"""
A mixin class providing shared list-like functionality to classes
representing groups of IP addresses.
"""
def __iter__(self):
"""
:return: An iterator providing access to all `IPAddress` objects
within range represented by this ranged IP object.
"""
start_ip = IPAddress(self.first, self._module.version)
end_ip = IPAddress(self.last, self._module.version)
return iter_iprange(start_ip, end_ip)
@property
def size(self):
"""
The total number of IP addresses within this ranged IP object.
"""
return int(self.last - self.first + 1)
def __len__(self):
"""
:return: the number of IP addresses in this ranged IP object. Raises
an `IndexError` if size > system max int (a Python 2.x
limitation). Use the .size property for subnets of any size.
"""
size = self.size
if size > _sys_maxint:
raise IndexError(("range contains more than %d (index size max) "
"IP addresses! Use the .size property instead." % _sys_maxint))
return size
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if (start + step < 0) or (step > stop):
# step value exceeds start and stop boundaries.
item = iter([IPAddress(self.first, self._module.version)])
else:
start_ip = IPAddress(self.first + start, self._module.version)
end_ip = IPAddress(self.first + stop - step, self._module.version)
item = iter_iprange(start_ip, end_ip, step)
else:
try:
index = int(index)
if (- self.size) <= index < 0:
# negative index.
item = IPAddress(self.last + index + 1, self._module.version)
elif 0 <= index <= (self.size - 1):
# Positive index or zero index.
item = IPAddress(self.first + index, self._module.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if isinstance(other, BaseIP):
if self._module.version != other._module.version:
return False
if isinstance(other, IPAddress):
return other._value >= self.first and other._value <= self.last
# Assume that we (and the other) provide .first and .last.
return other.first >= self.first and other.last <= self.last
# Whatever it is, try to interpret it as IPAddress.
return IPAddress(other) in self
def __nonzero__(self):
"""
Ranged IP objects always represent a sequence of at least one IP
address and are therefore always True in the boolean context.
"""
# Python 2.x.
return True
__bool__ = __nonzero__ # Python 3.x.
#-----------------------------------------------------------------------------
def parse_ip_network(module, addr, implicit_prefix=False, flags=0):
if isinstance(addr, tuple):
# CIDR integer tuple
try:
val1, val2 = addr
except ValueError:
raise AddrFormatError('invalid %s tuple!' % module.family_name)
if 0 <= val1 <= module.max_int:
value = val1
if 0 <= val2 <= module.width:
prefixlen = val2
else:
raise AddrFormatError('invalid prefix for %s tuple!' \
% module.family_name)
else:
raise AddrFormatError('invalid address value for %s tuple!' \
% module.family_name)
elif isinstance(addr, _str_type):
# CIDR-like string subnet
if implicit_prefix:
#TODO: deprecate this option in netaddr 0.8.x
addr = cidr_abbrev_to_verbose(addr)
try:
if '/' in addr:
val1, val2 = addr.split('/', 1)
else:
val1 = addr
val2 = None
except ValueError:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
try:
ip = IPAddress(val1, module.version, flags=INET_PTON)
except AddrFormatError:
if module.version == 4:
# Try a partial IPv4 network address...
expanded_addr = _ipv4.expand_partial_address(val1)
ip = IPAddress(expanded_addr, module.version, flags=INET_PTON)
else:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
value = ip._value
try:
# Integer CIDR prefix.
prefixlen = int(val2)
except TypeError:
if val2 is None:
# No prefix was specified.
prefixlen = module.width
except ValueError:
# Not an integer prefix, try a netmask/hostmask prefix.
mask = IPAddress(val2, module.version, flags=INET_PTON)
if mask.is_netmask():
prefixlen = module.netmask_to_prefix[mask._value]
elif mask.is_hostmask():
prefixlen = module.hostmask_to_prefix[mask._value]
else:
raise AddrFormatError('addr %r is not a valid IPNetwork!' \
% addr)
if not 0 <= prefixlen <= module.width:
raise AddrFormatError('invalid prefix for %s address!' \
% module.family_name)
else:
raise TypeError('unexpected type %s for addr arg' % type(addr))
if flags & NOHOST:
# Remove host bits.
netmask = module.prefix_to_netmask[prefixlen]
value = value & netmask
return value, prefixlen
#-----------------------------------------------------------------------------
class IPNetwork(BaseIP, IPListMixin):
"""
An IPv4 or IPv6 network or subnet.
A combination of an IP address and a network mask.
Accepts CIDR and several related variants :
a) Standard CIDR::
x.x.x.x/y -> 192.0.2.0/24
x::/y -> fe80::/10
b) Hybrid CIDR format (netmask address instead of prefix), where 'y' \
address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/255.255.255.0
x::/y:: -> fe80::/ffc0::
c) ACL hybrid CIDR format (hostmask address instead of prefix like \
Cisco's ACL bitmasks), where 'y' address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/0.0.0.255
x::/y:: -> fe80::/3f:ffff:ffff:ffff:ffff:ffff:ffff:ffff
d) Abbreviated CIDR format (as of netaddr 0.7.x this requires the \
optional constructor argument ``implicit_prefix=True``)::
x -> 192
x/y -> 10/8
x.x/y -> 192.168/16
x.x.x/y -> 192.168.0/24
which are equivalent to::
x.0.0.0/y -> 192.0.0.0/24
x.0.0.0/y -> 10.0.0.0/8
x.x.0.0/y -> 192.168.0.0/16
x.x.x.0/y -> 192.168.0.0/24
"""
__slots__ = ('_prefixlen',)
def __init__(self, addr, implicit_prefix=False, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address with optional CIDR prefix,
netmask or hostmask. May be an IP address in presentation
(string) format, an tuple containing and integer address and a
network prefix, or another IPAddress/IPNetwork object (copy
construction).
:param implicit_prefix: (optional) if True, the constructor uses
classful IPv4 rules to select a default prefix when one is not
provided. If False it uses the length of the IP address version.
(default: False)
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Currently only supports the
NOHOST option. See the netaddr.core docs for further details.
"""
super(IPNetwork, self).__init__()
value, prefixlen, module = None, None, None
if hasattr(addr, '_prefixlen'):
# IPNetwork object copy constructor
value = addr._value
module = addr._module
prefixlen = addr._prefixlen
elif hasattr(addr, '_value'):
# IPAddress object copy constructor
value = addr._value
module = addr._module
prefixlen = module.width
elif version == 4:
value, prefixlen = parse_ip_network(_ipv4, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv4
elif version == 6:
value, prefixlen = parse_ip_network(_ipv6, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv6
else:
if version is not None:
raise ValueError('%r is an invalid IP version!' % version)
try:
module = _ipv4
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
try:
module = _ipv6
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
pass
if value is None:
raise AddrFormatError('invalid IPNetwork %s' % addr)
self._value = value
self._prefixlen = prefixlen
self._module = module
def __getstate__(self):
""":return: Pickled state of an `IPNetwork` object."""
return self._value, self._prefixlen, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPNetwork` object.
"""
value, prefixlen, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
if 0 <= prefixlen <= self._module.width:
self._prefixlen = prefixlen
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
def _set_prefixlen(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.width:
raise AddrFormatError('invalid prefix for an %s address!' \
% self._module.family_name)
self._prefixlen = value
prefixlen = property(lambda self: self._prefixlen, _set_prefixlen,
doc='size of the bitmask used to separate the network from the host bits')
@property
def ip(self):
"""
The IP address of this `IPNetwork` object. This is may or may not be
the same as the network IP address which varies according to the value
of the CIDR subnet prefix.
"""
return IPAddress(self._value, self._module.version)
@property
def network(self):
"""The network address of this `IPNetwork` object."""
return IPAddress(self._value & self._netmask_int, self._module.version)
@property
def broadcast(self):
"""The broadcast address of this `IPNetwork` object"""
return IPAddress(self._value | self._hostmask_int, self._module.version)
@property
def first(self):
"""
The integer value of first IP address found within this `IPNetwork`
object.
"""
return self._value & (self._module.max_int ^ self._hostmask_int)
@property
def last(self):
"""
The integer value of last IP address found within this `IPNetwork`
object.
"""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return self._value | hostmask
@property
def netmask(self):
"""The subnet mask of this `IPNetwork` object."""
netmask = self._module.max_int ^ self._hostmask_int
return IPAddress(netmask, self._module.version)
@property
def _netmask_int(self):
"""Same as self.netmask, but in integer format"""
return self._module.max_int ^ self._hostmask_int
@property
def hostmask(self):
"""The host mask of this `IPNetwork` object."""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return IPAddress(hostmask, self._module.version)
@property
def _hostmask_int(self):
"""Same as self.hostmask, but in integer format"""
return (1 << (self._module.width - self._prefixlen)) - 1
@property
def cidr(self):
"""
The true CIDR address for this `IPNetwork` object which omits any
host bits to the right of the CIDR subnet prefix.
"""
return IPNetwork(
(self._value & self._netmask_int, self._prefixlen),
version=self._module.version)
def __iadd__(self, num):
"""
Increases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result exceeds maximum IP address value
or is less than zero.
:param num: (optional) number of `IPNetwork` blocks to increment \
this IPNetwork's value by.
"""
new_value = int(self.network) + (self.size * num)
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('increment exceeds address boundary!')
if new_value < 0:
raise IndexError('increment is less than zero!')
self._value = new_value
return self
def __isub__(self, num):
"""
Decreases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result is less than zero or exceeds
maximum IP address value.
:param num: (optional) number of `IPNetwork` blocks to decrement \
this IPNetwork's value by.
"""
new_value = int(self.network) - (self.size * num)
if new_value < 0:
raise IndexError('decrement is less than zero!')
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('decrement exceeds address boundary!')
self._value = new_value
return self
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if isinstance(other, BaseIP):
if self._module.version != other._module.version:
return False
# self_net will contain only the network bits.
shiftwidth = self._module.width - self._prefixlen
self_net = self._value >> shiftwidth
if isinstance(other, IPRange):
# IPRange has no _value.
# (self_net+1)<<shiftwidth is not our last address, but the one
# after the last one.
return ((self_net << shiftwidth) <= other._start._value and
(((self_net + 1) << shiftwidth) > other._end._value))
other_net = other._value >> shiftwidth
if isinstance(other, IPAddress):
return other_net == self_net
if isinstance(other, IPNetwork):
return self_net == other_net and self._prefixlen <= other._prefixlen
# Whatever it is, try to interpret it as IPAddress.
return IPAddress(other) in self
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPNetwork`.
"""
return self._module.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPNetwork` correctly.
"""
net_size_bits = self._prefixlen - 1
first = self._value & (self._module.max_int ^ self._hostmask_int)
host_bits = self._value - first
return self._module.version, first, net_size_bits, host_bits
def ipv4(self):
"""
:return: A numerically equivalent version 4 `IPNetwork` object. \
Raises an `AddrConversionError` if IPv6 address cannot be \
converted to IPv4.
"""
ip = None
klass = self.__class__
if self._module.version == 4:
ip = klass('%s/%d' % (self.ip, self.prefixlen))
elif self._module.version == 6:
if 0 <= self._value <= _ipv4.max_int:
addr = _ipv4.int_to_str(self._value)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
addr = _ipv4.int_to_str(self._value - 0xffff00000000)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: the IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPNetwork` object.
"""
ip = None
klass = self.__class__
if self._module.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass((self._value - 0xffff00000000, self._prefixlen),
version=6)
else:
ip = klass((self._value, self._prefixlen), version=6)
elif self._module.version == 4:
if ipv4_compatible:
# IPv4-Compatible IPv6 address
ip = klass((self._value, self._prefixlen + 96), version=6)
else:
# IPv4-Mapped IPv6 address
ip = klass((0xffff00000000 + self._value,
self._prefixlen + 96), version=6)
return ip
def previous(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the previous IP subnet).
:return: The adjacent subnet preceding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self._module.version)
ip_copy -= step
return ip_copy
def next(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the next IP subnet).
:return: The adjacent subnet succeeding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self._module.version)
ip_copy += step
return ip_copy
def supernet(self, prefixlen=0):
"""
Provides a list of supernets for this `IPNetwork` object between the
size of the current prefix and (if specified) an endpoint prefix.
:param prefixlen: (optional) a CIDR prefix for the maximum supernet.
Default: 0 - returns all possible supernets.
:return: a tuple of supernet `IPNetwork` objects.
"""
if not 0 <= prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self._module.version))
supernets = []
# Use a copy of self as we'll be editing it.
supernet = self.cidr
supernet._prefixlen = prefixlen
while supernet._prefixlen != self._prefixlen:
supernets.append(supernet.cidr)
supernet._prefixlen += 1
return supernets
def subnet(self, prefixlen, count=None, fmt=None):
"""
A generator that divides up this IPNetwork's subnet into smaller
subnets based on a specified CIDR prefix.
:param prefixlen: a CIDR prefix indicating size of subnets to be
returned.
:param count: (optional) number of consecutive IP subnets to be
returned.
:return: an iterator containing IPNetwork subnet objects.
"""
if not 0 <= self.prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self._module.version))
if not self.prefixlen <= prefixlen:
# Don't return anything.
raise StopIteration
# Calculate number of subnets to be returned.
width = self._module.width
max_subnets = 2 ** (width - self.prefixlen) // 2 ** (width - prefixlen)
if count is None:
count = max_subnets
if not 1 <= count <= max_subnets:
raise ValueError('count outside of current IP subnet boundary!')
base_subnet = self._module.int_to_str(self.first)
i = 0
while(i < count):
subnet = self.__class__('%s/%d' % (base_subnet, prefixlen),
self._module.version)
subnet.value += (subnet.size * i)
subnet.prefixlen = prefixlen
i += 1
yield subnet
def iter_hosts(self):
"""
An generator that provides all the IP addresses that can be assigned
to hosts within the range of this IP object's subnet.
- for IPv4, the network and broadcast addresses are always excluded. \
Any subnet that contains less than 4 IP addresses yields an empty list.
- for IPv6, only the unspecified address '::' is excluded from any \
yielded IP addresses.
:return: an IPAddress iterator
"""
it_hosts = iter([])
if self._module.version == 4:
# IPv4 logic.
if self.size >= 4:
it_hosts = iter_iprange(
IPAddress(self.first+1, self._module.version),
IPAddress(self.last-1, self._module.version))
else:
# IPv6 logic.
if self.first == 0:
if self.size != 1:
# Don't return '::'.
it_hosts = iter_iprange(
IPAddress(self.first + 1, self._module.version),
IPAddress(self.last, self._module.version))
else:
it_hosts = iter(self)
return it_hosts
def __str__(self):
""":return: this IPNetwork in CIDR format"""
addr = self._module.int_to_str(self._value)
return "%s/%s" % (addr, self.prefixlen)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPRange(BaseIP, IPListMixin):
"""
An arbitrary IPv4 or IPv6 address range.
Formed from a lower and upper bound IP address. The upper bound IP cannot
be numerically smaller than the lower bound and the IP version of both
must match.
"""
__slots__ = ('_start', '_end')
def __init__(self, start, end, flags=0):
"""
Constructor.
:param start: an IPv4 or IPv6 address that forms the lower
boundary of this IP range.
:param end: an IPv4 or IPv6 address that forms the upper
boundary of this IP range.
:param flags: (optional) decides which rules are applied to the
interpretation of the start and end values. Supported constants
are INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
self._start = IPAddress(start, flags=flags)
self._module = self._start._module
self._end = IPAddress(end, self._module.version, flags=flags)
if int(self._start) > int(self._end):
raise AddrFormatError('lower bound IP greater than upper bound!')
def __getstate__(self):
""":return: Pickled state of an `IPRange` object."""
return self._start.value, self._end.value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPRange` object.
"""
start, end, version = state
self._start = IPAddress(start, version)
self._module = self._start._module
self._end = IPAddress(end, version)
def __contains__(self, other):
if isinstance(other, BaseIP):
if self._module.version != other._module.version:
return False
if isinstance(other, IPAddress):
return (self._start._value <= other._value and
self._end._value >= other._value)
if isinstance(other, IPRange):
return (self._start._value <= other._start._value and
self._end._value >= other._end._value)
if isinstance(other, IPNetwork):
shiftwidth = other._module.width - other._prefixlen
other_start = (other._value >> shiftwidth) << shiftwidth
# Start of the next network after other
other_next_start = other_start + (1 << shiftwidth)
return (self._start._value <= other_start and
self._end._value > other_next_start)
# Whatever it is, try to interpret it as IPAddress.
return IPAddress(other) in self
@property
def first(self):
"""The integer value of first IP address in this `IPRange` object."""
return int(self._start)
@property
def last(self):
"""The integer value of last IP address in this `IPRange` object."""
return int(self._end)
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPRange`.
"""
return self._module.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPRange` correctly.
"""
skey = self._module.width - num_bits(self.size)
return self._module.version, self._start._value, skey
def cidrs(self):
"""
The list of CIDR addresses found within the lower and upper bound
addresses of this `IPRange`.
"""
return iprange_to_cidrs(self._start, self._end)
def __str__(self):
""":return: this `IPRange` in a common representational format."""
return "%s-%s" % (self._start, self._end)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s', '%s')" % (self.__class__.__name__,
self._start, self._end)
#-----------------------------------------------------------------------------
def iter_unique_ips(*args):
"""
:param args: A list of IP addresses and subnets passed in as arguments.
:return: A generator that flattens out IP subnets, yielding unique
individual IP addresses (no duplicates).
"""
for cidr in cidr_merge(args):
for ip in cidr:
yield ip
#-----------------------------------------------------------------------------
def cidr_abbrev_to_verbose(abbrev_cidr):
"""
A function that converts abbreviated IPv4 CIDRs to their more verbose
equivalent.
:param abbrev_cidr: an abbreviated CIDR.
Uses the old-style classful IP address rules to decide on a default
subnet prefix if one is not explicitly provided.
Only supports IPv4 addresses.
Examples ::
10 - 10.0.0.0/8
10/16 - 10.0.0.0/16
128 - 128.0.0.0/16
128/8 - 128.0.0.0/8
192.168 - 192.168.0.0/16
:return: A verbose CIDR from an abbreviated CIDR or old-style classful \
network address, The original value if it was not recognised as a \
supported abbreviation.
"""
# Internal function that returns a prefix value based on the old IPv4
# classful network scheme that has been superseded (almost) by CIDR.
def classful_prefix(octet):
octet = int(octet)
if not 0 <= octet <= 255:
raise IndexError('Invalid octet: %r!' % octet)
if 0 <= octet <= 127: # Legacy class 'A' classification.
return 8
elif 128 <= octet <= 191: # Legacy class 'B' classification.
return 16
elif 192 <= octet <= 223: # Legacy class 'C' classification.
return 24
elif 224 <= octet <= 239: # Multicast address range.
return 4
return 32 # Default.
start = ''
tokens = []
prefix = None
if _is_str(abbrev_cidr):
if ':' in abbrev_cidr:
return abbrev_cidr
try:
# Single octet partial integer or string address.
i = int(abbrev_cidr)
tokens = [str(i), '0', '0', '0']
return "%s%s/%s" % (start, '.'.join(tokens), classful_prefix(i))
except ValueError:
# Multi octet partial string address with optional prefix.
part_addr = abbrev_cidr
tokens = []
if part_addr == '':
# Not a recognisable format.
return abbrev_cidr
if '/' in part_addr:
(part_addr, prefix) = part_addr.split('/', 1)
# Check prefix for validity.
if prefix is not None:
try:
if not 0 <= int(prefix) <= 32:
raise ValueError('prefixlen in address %r out of range' \
' for IPv4!' % abbrev_cidr)
except ValueError:
return abbrev_cidr
if '.' in part_addr:
tokens = part_addr.split('.')
else:
tokens = [part_addr]
if 1 <= len(tokens) <= 4:
for i in range(4 - len(tokens)):
tokens.append('0')
else:
# Not a recognisable format.
return abbrev_cidr
if prefix is None:
try:
prefix = classful_prefix(tokens[0])
except ValueError:
return abbrev_cidr
return "%s%s/%s" % (start, '.'.join(tokens), prefix)
except TypeError:
pass
except IndexError:
pass
# Not a recognisable format.
return abbrev_cidr
#-----------------------------------------------------------------------------
def cidr_merge(ip_addrs):
"""
A function that accepts an iterable sequence of IP addresses and subnets
merging them into the smallest possible list of CIDRs. It merges adjacent
subnets where possible, those contained within others and also removes
any duplicates.
:param ip_addrs: an iterable sequence of IP addresses and subnets.
:return: a summarized list of `IPNetwork` objects.
"""
if not hasattr(ip_addrs, '__iter__'):
raise ValueError('A sequence or iterator is expected!')
# Start off using set as we'll remove any duplicates at the start.
ipv4_bit_cidrs = set()
ipv6_bit_cidrs = set()
# Convert IP addresses and subnets into their CIDR bit strings.
ipv4_match_all_found = False
ipv6_match_all_found = False
for ip in ip_addrs:
cidr = IPNetwork(ip)
bits = cidr.network.bits(word_sep='')[0:cidr.prefixlen]
if cidr.version == 4:
if bits == '':
# This is the /0 network, that includes all IPv4 addresses.
ipv4_match_all_found = True
ipv4_bit_cidrs = set(['']) # Clear all other IPv4 values.
if not ipv4_match_all_found:
ipv4_bit_cidrs.add(bits)
else:
if bits == '':
# This is the /0 network, that includes all IPv6 addresses.
ipv6_match_all_found = True
ipv6_bit_cidrs = set(['']) # Clear all other IPv6 values.
if not ipv6_match_all_found:
ipv6_bit_cidrs.add(bits)
# Merge binary CIDR addresses where possible.
def _reduce_bit_cidrs(cidrs):
new_cidrs = []
cidrs.sort()
# Multiple passes are required to obtain precise results.
while 1:
finished = True
while (cidrs):
if not new_cidrs:
new_cidrs.append(cidrs.pop(0))
if not cidrs:
break
# lhs and rhs are same size and adjacent.
(new_cidr, subs) = RE_CIDR_ADJACENT.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# merge lhs with rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# lhs contains rhs.
(new_cidr, subs) = RE_CIDR_WITHIN.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# keep lhs, discard rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# no matches - accept rhs.
new_cidrs.append(cidrs.pop(0))
if finished:
break
else:
# still seeing matches, reset.
cidrs = new_cidrs
new_cidrs = []
if new_cidrs == ['0', '1']:
# Special case where summary CIDR result is '0.0.0.0/0' or
# '::/0' i.e. the whole IPv4 or IPv6 address space.
new_cidrs = ['']
return new_cidrs
new_cidrs = []
def _bits_to_cidr(bits, module):
if bits == '':
if module.version == 4:
return IPNetwork('0.0.0.0/0', 4)
else:
return IPNetwork('::/0', 6)
if RE_VALID_CIDR_BITS.match(bits) is None:
raise ValueError('%r is an invalid bit string!' % bits)
num_bits = len(bits)
if bits == '':
return IPAddress(module.int_to_str(0), module.version)
else:
bits = bits + '0' * (module.width - num_bits)
return IPNetwork((module.bits_to_int(bits), num_bits),
version=module.version)
# Reduce and format lists of reduced CIDRs.
for bits in _reduce_bit_cidrs(list(ipv4_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv4))
for bits in _reduce_bit_cidrs(list(ipv6_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv6))
return new_cidrs
#-----------------------------------------------------------------------------
def cidr_exclude(target, exclude):
"""
Removes an exclude IP address or subnet from target IP subnet.
:param target: the target IP address or subnet to be divided up.
:param exclude: the IP address or subnet to be removed from target.
:return: list of `IPNetwork` objects remaining after exclusion.
"""
target = IPNetwork(target)
exclude = IPNetwork(exclude)
if exclude.last < target.first:
# Exclude subnet's upper bound address less than target
# subnet's lower bound.
return [target.cidr]
elif target.last < exclude.first:
# Exclude subnet's lower bound address greater than target
# subnet's upper bound.
return [target.cidr]
cidrs = []
new_prefixlen = target.prefixlen + 1
# Some @properties that are expensive to get and don't change below.
target_module_width = target._module.width
if new_prefixlen <= target_module_width:
target_first = target.first
version = exclude.version
i_lower = target_first
i_upper = target_first + (2 ** (target_module_width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen), version=version)
upper = IPNetwork((i_upper, new_prefixlen), version=version)
while exclude.prefixlen >= new_prefixlen:
if exclude in lower:
matched = i_lower
unmatched = i_upper
elif exclude in upper:
matched = i_upper
unmatched = i_lower
else:
# Exclude subnet not within target subnet.
cidrs.append(target.cidr)
break
ip = IPNetwork((unmatched, new_prefixlen), version=version)
cidrs.append(ip)
new_prefixlen += 1
if new_prefixlen > target_module_width:
break
i_lower = matched
i_upper = matched + (2 ** (target_module_width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen), version=version)
upper = IPNetwork((i_upper, new_prefixlen), version=version)
cidrs.sort()
return cidrs
#-----------------------------------------------------------------------------
def spanning_cidr(ip_addrs):
"""
Function that accepts a sequence of IP addresses and subnets returning
a single `IPNetwork` subnet that is large enough to span the lower and
upper bound IP addresses with a possible overlap on either end.
:param ip_addrs: sequence of IP addresses and subnets.
:return: a single spanning `IPNetwork` subnet.
"""
sorted_ips = sorted(
[IPNetwork(ip) for ip in ip_addrs])
if not len(sorted_ips) > 1:
raise ValueError('IP sequence must contain at least 2 elements!')
lowest_ip = sorted_ips[0]
highest_ip = sorted_ips[-1]
if lowest_ip.version != highest_ip.version:
raise TypeError('IP sequence cannot contain both IPv4 and IPv6!')
ip = highest_ip.cidr
while ip.prefixlen > 0:
if highest_ip in ip and lowest_ip not in ip:
ip.prefixlen -= 1
else:
break
return ip.cidr
#-----------------------------------------------------------------------------
def iter_iprange(start, end, step=1):
"""
A generator that produces IPAddress objects between an arbitrary start
and stop IP address with intervals of step between them. Sequences
produce are inclusive of boundary IPs.
:param start: start IP address.
:param end: end IP address.
:param step: (optional) size of step between IP addresses. Default: 1
:return: an iterator of one or more `IPAddress` objects.
"""
start = IPAddress(start)
end = IPAddress(end)
if start.version != end.version:
raise TypeError('start and stop IP versions do not match!')
version = start.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
# We don't need objects from here, just integers.
start = int(start)
stop = int(end)
negative_step = False
if step < 0:
negative_step = True
index = start - step
while True:
index += step
if negative_step:
if not index >= stop:
break
else:
if not index <= stop:
break
yield IPAddress(index, version)
#-----------------------------------------------------------------------------
def iprange_to_cidrs(start, end):
"""
A function that accepts an arbitrary start and end IP address or subnet
and returns a list of CIDR subnets that fit exactly between the boundaries
of the two with no overlap.
:param start: the start IP address or subnet.
:param end: the end IP address or subnet.
:return: a list of one or more IP addresses and subnets.
"""
cidr_list = []
start = IPNetwork(start)
end = IPNetwork(end)
iprange = [start.first, end.last]
# Get spanning CIDR covering both addresses.
cidr_span = spanning_cidr([start, end])
if cidr_span.first == iprange[0] and cidr_span.last == iprange[-1]:
# Spanning CIDR matches start and end exactly.
cidr_list = [cidr_span]
elif cidr_span.last == iprange[-1]:
# Spanning CIDR matches end exactly.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
elif cidr_span.first == iprange[0]:
# Spanning CIDR matches start exactly.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_span, ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
elif cidr_span.first <= iprange[0] and cidr_span.last >= iprange[-1]:
# Spanning CIDR overlaps start and end.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
# Fix start.
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
# Fix end.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_list.pop(), ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
return cidr_list
#-----------------------------------------------------------------------------
def smallest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the smallest (most specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
else:
if match is not None:
break
return match
#-----------------------------------------------------------------------------
def largest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the largest (least specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
break
return match
#-----------------------------------------------------------------------------
def all_matching_cidrs(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: all matching IPAddress and/or IPNetwork objects from the provided
sequence, an empty list if there was no match.
"""
matches = []
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
matches.append(cidr)
else:
if matches:
break
return matches
#-----------------------------------------------------------------------------
# Cached IPv4 address range lookups.
#-----------------------------------------------------------------------------
IPV4_LOOPBACK = IPNetwork('127.0.0.0/8')
IPV4_PRIVATE = (
IPNetwork('10.0.0.0/8'), # Private-Use Networks
IPNetwork('172.16.0.0/12'), # Private-Use Networks
IPNetwork('192.0.2.0/24'), # Test-Net
IPNetwork('192.168.0.0/16'), # Private-Use Networks
IPRange('239.0.0.0', '239.255.255.255'), # Administrative Multicast
)
IPV4_LINK_LOCAL = IPNetwork('169.254.0.0/16')
IPV4_MULTICAST = IPNetwork('224.0.0.0/4')
IPV4_6TO4 = IPNetwork('192.88.99.0/24') # 6to4 Relay Anycast
IPV4_RESERVED = (
IPNetwork('128.0.0.0/16'), # Reserved but subject to allocation
IPNetwork('191.255.0.0/16'), # Reserved but subject to allocation
IPNetwork('192.0.0.0/24'), # Reserved but subject to allocation
IPNetwork('223.255.255.0/24'), # Reserved but subject to allocation
IPNetwork('240.0.0.0/4'), # Reserved for Future Use
# Reserved multicast
IPRange('234.0.0.0', '238.255.255.255'),
IPRange('225.0.0.0', '231.255.255.255'),
)
#-----------------------------------------------------------------------------
# Cached IPv6 address range lookups.
#-----------------------------------------------------------------------------
IPV6_LOOPBACK = IPAddress('::1')
IPV6_PRIVATE = (
IPNetwork('fc00::/7'), # Unique Local Addresses (ULA)
IPNetwork('fec0::/10'), # Site Local Addresses (deprecated - RFC 3879)
)
IPV6_LINK_LOCAL = IPNetwork('fe80::/10')
IPV6_MULTICAST = IPNetwork('ff00::/8')
IPV6_RESERVED = (
IPNetwork('ff00::/12'), IPNetwork('::/8'),
IPNetwork('0100::/8'), IPNetwork('0200::/7'),
IPNetwork('0400::/6'), IPNetwork('0800::/5'),
IPNetwork('1000::/4'), IPNetwork('4000::/3'),
IPNetwork('6000::/3'), IPNetwork('8000::/3'),
IPNetwork('A000::/3'), IPNetwork('C000::/3'),
IPNetwork('E000::/4'), IPNetwork('F000::/5'),
IPNetwork('F800::/6'), IPNetwork('FE00::/9'),
)
| gpl-2.0 |
sarvex/tensorflow | tensorflow/compiler/tests/function_test.py | 25 | 4928 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for Tensorflow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class FunctionTest(xla_test.XLATestCase):
def testFunction(self):
"""Executes a simple TensorFlow function."""
def APlus2B(a, b):
return a + b * 2
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32)
expected = APlus2B(aval, bval)
with self.session():
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return APlus2B(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_f = Foo(a, b)
result = self.evaluate(call_f)
self.assertAllClose(result, expected, rtol=1e-3)
def testNestedFunctions(self):
"""Executes two nested TensorFlow functions."""
def TimesTwo(x):
return x * 2
def APlus2B(a, b):
return a + TimesTwo(b)
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
expected = APlus2B(aval, bval)
with self.session():
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return APlus2B(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_g = Foo(a, b)
result = self.evaluate(call_g)
self.assertAllClose(result, expected, rtol=1e-3)
def testFunctionMultipleRetvals(self):
"""Executes a function with multiple return values."""
# This function will run on the XLA device
def Func(a, b):
return a + b, a - b
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([5, 6, 7, 8]).reshape([2, 2]).astype(np.float32)
expected = Func(aval, bval)
with self.session():
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(a, b):
return Func(a, b)
a = constant_op.constant(aval, name="a")
b = constant_op.constant(bval, name="b")
with self.test_scope():
call_f = Foo(a, b)
result = self.evaluate(call_f)
self.assertAllClose(result, expected, rtol=1e-3)
def testCompileTimeConstantsInDefun(self):
"""Tests that XLA handles compile-time constants in defuns."""
with self.session() as sess:
@function.Defun(dtypes.float32, dtypes.int32, dtypes.int32)
def Foo(a, c, d):
# c and d must be known at compile time
x = array_ops.slice(a, c, d)
return x
a = array_ops.placeholder(dtypes.float32)
c = array_ops.placeholder(dtypes.int32, shape=[4])
d = array_ops.placeholder(dtypes.int32, shape=[4])
with self.test_scope():
call_f = Foo(a, c, d)
result = sess.run(call_f, feed_dict={
a: np.ones([1, 4, 4, 1]),
c: [0, 0, 0, 0],
d: [1, 2, 2, 1]})
self.assertAllEqual(np.ones([1, 2, 2, 1]), result)
# TODO(b/36139787): Re-enable this test when noinline works again.
def DISABLED_testFunctionsNoInline(self):
@function.Defun(dtypes.float32, noinline=True)
def TimesTwo(x):
return x * 2
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
return a + TimesTwo(b)
aval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
bval = np.array([4, 3, 2, 1]).reshape([2, 2]).astype(np.float32)
expected = aval + bval * 2
with self.session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtypes.float32, name="a")
b = array_ops.placeholder(dtypes.float32, name="b")
call = APlus2B(a, b)
result = sess.run(call, {a: aval, b: bval})
self.assertAllClose(result, expected, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
lovehhf/django-social-auth | social_auth/backends/contrib/gae.py | 12 | 1796 | """
Google App Engine support using User API
"""
from __future__ import absolute_import
from google.appengine.api import users
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from social_auth.backends import SocialAuthBackend, BaseAuth, USERNAME
from social_auth.backends.exceptions import AuthException
class GAEBackend(SocialAuthBackend):
"""GoogleAppengine authentication backend"""
name = 'google-appengine'
def get_user_id(self, details, response):
"""Return current user id."""
user = users.get_current_user()
if user:
return user.user_id()
def get_user_details(self, response):
"""Return user basic information (id and email only)."""
user = users.get_current_user()
return {USERNAME: user.user_id(),
'email': user.email(),
'fullname': '',
'first_name': '',
'last_name': ''}
# Auth classes
class GAEAuth(BaseAuth):
"""GoogleAppengine authentication"""
AUTH_BACKEND = GAEBackend
def auth_url(self):
"""Build and return complete URL."""
return users.create_login_url(reverse('socialauth_complete',
args=(self.AUTH_BACKEND.name,)))
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance."""
if not users.get_current_user():
raise AuthException('Authentication error')
# Setting these two are necessary for BaseAuth.authenticate to work
kwargs.update({
'response': '',
self.AUTH_BACKEND.name: True
})
return authenticate(*args, **kwargs)
# Backend definition
BACKENDS = {
'gae': GAEAuth,
}
| bsd-3-clause |
elventear/ansible | test/units/executor/test_playbook_executor.py | 60 | 6304 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.playbook import Playbook
from ansible.template import Templar
from units.mock.loader import DictDataLoader
class TestPlaybookExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_serialized_batches(self):
fake_loader = DictDataLoader({
'no_serial.yml': '''
- hosts: all
gather_facts: no
tasks:
- debug: var=inventory_hostname
''',
'serial_int.yml': '''
- hosts: all
gather_facts: no
serial: 2
tasks:
- debug: var=inventory_hostname
''',
'serial_pct.yml': '''
- hosts: all
gather_facts: no
serial: 20%
tasks:
- debug: var=inventory_hostname
''',
'serial_list.yml': '''
- hosts: all
gather_facts: no
serial: [1, 2, 3]
tasks:
- debug: var=inventory_hostname
''',
'serial_list_mixed.yml': '''
- hosts: all
gather_facts: no
serial: [1, "20%", -1]
tasks:
- debug: var=inventory_hostname
''',
})
mock_inventory = MagicMock()
mock_var_manager = MagicMock()
# fake out options to use the syntax CLI switch, which will ensure
# the PlaybookExecutor doesn't create a TaskQueueManager
mock_options = MagicMock()
mock_options.syntax.value = True
templar = Templar(loader=fake_loader)
pbe = PlaybookExecutor(
playbooks=['no_serial.yml', 'serial_int.yml', 'serial_pct.yml', 'serial_list.yml', 'serial_list_mixed.yml'],
inventory=mock_inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
options=mock_options,
passwords=[],
)
playbook = Playbook.load(pbe._playbooks[0], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']])
playbook = Playbook.load(pbe._playbooks[1], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0','host1'],['host2','host3'],['host4','host5'],['host6','host7'],['host8','host9']])
playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0','host1'],['host2','host3'],['host4','host5'],['host6','host7'],['host8','host9']])
playbook = Playbook.load(pbe._playbooks[3], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0'],['host1','host2'],['host3','host4','host5'],['host6','host7','host8'],['host9']])
playbook = Playbook.load(pbe._playbooks[4], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9']
self.assertEqual(pbe._get_serialized_batches(play), [['host0'],['host1','host2'],['host3','host4','host5','host6','host7','host8','host9']])
# Test when serial percent is under 1.0
playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2']
self.assertEqual(pbe._get_serialized_batches(play), [['host0'],['host1'],['host2']])
# Test when there is a remainder for serial as a percent
playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
play = playbook.get_plays()[0]
play.post_validate(templar)
mock_inventory.get_hosts.return_value = ['host0','host1','host2','host3','host4','host5','host6','host7','host8','host9','host10']
self.assertEqual(
pbe._get_serialized_batches(play),
[['host0','host1'],['host2','host3'],['host4','host5'],['host6','host7'],['host8','host9'],['host10']]
)
| gpl-3.0 |
adamncasey/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_sanitizer.py | 430 | 4645 | from __future__ import absolute_import, division, unicode_literals
try:
import json
except ImportError:
import simplejson as json
from html5lib import html5parser, sanitizer, constants, treebuilders
def toxmlFactory():
tree = treebuilders.getTreeBuilder("etree")
def toxml(element):
# encode/decode roundtrip required for Python 2.6 compatibility
result_bytes = tree.implementation.tostring(element, encoding="utf-8")
return result_bytes.decode("utf-8")
return toxml
def runSanitizerTest(name, expected, input, toxml=None):
if toxml is None:
toxml = toxmlFactory()
expected = ''.join([toxml(token) for token in html5parser.HTMLParser().
parseFragment(expected)])
expected = json.loads(json.dumps(expected))
assert expected == sanitize_html(input)
def sanitize_html(stream, toxml=None):
if toxml is None:
toxml = toxmlFactory()
return ''.join([toxml(token) for token in
html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer).
parseFragment(stream)])
def test_should_handle_astral_plane_characters():
assert '<html:p xmlns:html="http://www.w3.org/1999/xhtml">\U0001d4b5 \U0001d538</html:p>' == sanitize_html("<p>𝒵 𝔸</p>")
def test_sanitizer():
toxml = toxmlFactory()
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr']:
continue # TODO
if tag_name != tag_name.lower():
continue # TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
tag_name = tag_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
if attribute_name != attribute_name.lower():
continue # TODO
if attribute_name == 'style':
continue
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"foo\">foo <bad>bar</bad> baz</p>" % attribute_name,
"<p %s='foo'>foo <bad>bar</bad> baz</p>" % attribute_name,
toxml)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
attribute_name = attribute_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_attribute" % attribute_name,
"<p>foo <bad>bar</bad> baz</p>",
"<p %s='display: none;'>foo <bad>bar</bad> baz</p>" % attribute_name,
toxml)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol,
toxml)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol,
toxml)
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.