repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
openthread/openthread | tests/scripts/thread-cert/test_diag.py | 3 | 4131 | #!/usr/bin/env python3
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import thread_cert
class TestDiag(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {1: None}
def test(self):
node = self.nodes[1]
cases = [
('diag\n', 'diagnostics mode is disabled\r\n'),
('diag send 10 100\n', 'Error 13: InvalidState\r\n'),
('diag start\n', 'Done\r\n'),
('diag invalid test\n', 'diag feature \'invalid\' is not supported'),
('diag', 'diagnostics mode is enabled\r\n'),
('diag channel 10\n', 'failed\r\nstatus 0x7\r\n'),
('diag channel 11\n', 'set channel to 11\r\nstatus 0x00\r\n'),
('diag channel\n', 'channel: 11\r\n'),
('diag power -10\n', 'set tx power to -10 dBm\r\nstatus 0x00\r\n'),
('diag power\n', 'tx power: -10 dBm\r\n'),
(
'diag stats\n',
'received packets: 0\r\nsent packets: 0\r\n'
'first received packet: rssi=0, lqi=0\r\n'
'last received packet: rssi=0, lqi=0\r\n',
),
(
'diag send 20 100\n',
r'sending 0x14 packet\(s\), length 0x64\r\nstatus 0x00\r\n',
),
(
' diag \t send \t 20\t100',
r'sending 0x14 packet\(s\), length 0x64\r\nstatus 0x00\r\n',
),
(
'diag repeat 100 100\n',
'sending packets of length 0x64 at the delay of 0x64 ms\r\nstatus 0x00\r\n',
),
(
'diag repeat stop\n',
'repeated packet transmission is stopped\r\nstatus 0x00\r\n',
),
(
'diag stop\n',
r'received packets: 0\r\nsent packets: ([1-9]\d*)\r\n'
'first received packet: rssi=0, lqi=0\r\n'
'last received packet: rssi=0, lqi=0\r\n\n'
r'stop diagnostics mode\r\nstatus 0x00\r\n',
),
('diag', 'diagnostics mode is disabled\r\n'),
(
'diag 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32',
r'Error 7: InvalidArgs\r\n',
),
]
for case in cases:
node.send_command(case[0])
self.simulator.go(1)
if type(self.simulator).__name__ == 'VirtualTime':
time.sleep(0.1)
node._expect(case[1])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
zhulin2609/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/irc/ircbot.py | 127 | 4228 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.config import irc as config_irc
from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
from webkitpy.thirdparty.autoinstalled.irc import ircbot
from webkitpy.thirdparty.autoinstalled.irc import irclib
class IRCBotDelegate(object):
def irc_message_received(self, nick, message):
raise NotImplementedError, "subclasses must implement"
def irc_nickname(self):
raise NotImplementedError, "subclasses must implement"
def irc_password(self):
raise NotImplementedError, "subclasses must implement"
class IRCBot(ircbot.SingleServerIRCBot, MessagePumpDelegate):
# FIXME: We should get this information from a config file.
def __init__(self,
message_queue,
delegate):
self._message_queue = message_queue
self._delegate = delegate
ircbot.SingleServerIRCBot.__init__(
self,
[(
config_irc.server,
config_irc.port,
self._delegate.irc_password()
)],
self._delegate.irc_nickname(),
self._delegate.irc_nickname())
self._channel = config_irc.channel
# ircbot.SingleServerIRCBot methods
def on_nicknameinuse(self, connection, event):
connection.nick(connection.get_nickname() + "_")
def on_welcome(self, connection, event):
connection.join(self._channel)
self._message_pump = MessagePump(self, self._message_queue)
def on_pubmsg(self, connection, event):
nick = irclib.nm_to_n(event.source())
request = event.arguments()[0]
if not irclib.irc_lower(request).startswith(irclib.irc_lower(connection.get_nickname())):
return
if len(request) <= len(connection.get_nickname()):
return
# Some IRC clients, like xchat-gnome, default to using a comma
# when addressing someone.
vocative_separator = request[len(connection.get_nickname())]
if vocative_separator == ':':
request = request.split(':', 1)
elif vocative_separator == ',':
request = request.split(',', 1)
else:
return
if len(request) > 1:
response = self._delegate.irc_message_received(nick, request[1])
if response:
connection.privmsg(self._channel, response)
# MessagePumpDelegate methods
def schedule(self, interval, callback):
self.connection.execute_delayed(interval, callback)
def message_available(self, message):
self.connection.privmsg(self._channel, message)
def final_message_delivered(self):
self.die()
| bsd-3-clause |
iAMr00t/opencog | opencog/python/web/api/apiatomcollection.py | 14 | 21847 | __author__ = 'Cosmo Harrigan'
from flask import abort, json, current_app, jsonify
from flask.ext.restful import Resource, reqparse, marshal
import opencog.cogserver
from opencog.atomspace import Handle, Atom
from mappers import *
from flask.ext.restful.utils import cors
from flask_restful_swagger import swagger
# If the system doesn't have these dependencies installed, display a warning
# but allow the API to load
try:
from graph_description import dot
except ImportError:
print "DOT graph description format option not enabled in REST API. To " \
"enable, install the dependencies listed here:\n" \
"https://github.com/opencog/opencog/tree/master/opencog/python/graph_description#prerequisites"
"AtomSpace management functionality"
class AtomCollectionAPI(Resource):
# This is because of https://github.com/twilio/flask-restful/issues/134
@classmethod
def new(cls, atomspace):
cls.atomspace = atomspace
return cls
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
'type', type=str, location='args', choices=types.__dict__.keys())
self.reqparse.add_argument('name', type=str, location='args')
self.reqparse.add_argument('callback', type=str, location='args')
self.reqparse.add_argument('filterby', type=str, location='args',
choices=['stirange', 'attentionalfocus'])
self.reqparse.add_argument('stimin', type=int, location='args')
self.reqparse.add_argument('stimax', type=int, location='args')
self.reqparse.add_argument('tvStrengthMin', type=float, location='args')
self.reqparse.add_argument(
'tvConfidenceMin', type=float, location='args')
self.reqparse.add_argument('tvCountMin', type=float, location='args')
self.reqparse.add_argument(
'includeIncoming', type=str, location='args',
choices=['true', 'false', 'True', 'False', '0', '1'])
self.reqparse.add_argument(
'includeOutgoing', type=str, location='args',
choices=['true', 'false', 'True', 'False', '0', '1'])
self.reqparse.add_argument(
'dot', type=str, location='args',
choices=['true', 'false', 'True', 'False', '0', '1'])
self.reqparse.add_argument('limit', type=int, location='args')
super(AtomCollectionAPI, self).__init__()
self.atomspace = opencog.cogserver.get_server_atomspace()
# Set CORS headers to allow cross-origin access
# (https://github.com/twilio/flask-restful/pull/131):
@cors.crossdomain(origin='*')
@swagger.operation(
notes='''
<p>URI: <code>atoms/[id]</code>
<p>(or)
<code>atoms?type=[type]&name=[name]&filterby=[filterby]
&tvStrengthMin=[tvStrengthMin]&tvConfidenceMin=[tvConfidenceMin]
&tvCountMin=[tvCountMin]&includeIncoming=[includeIncoming]
&includeOutgoing=[includeOutgoing]&limit=[limit]&callback=[callback]</code>
<p>Example:
<pre>{
'result':
{
'complete': 'true',
'skipped': 'false',
'total': 10,
'atoms':
[
{ 'handle': 6,
'name': '',
'type': 'InheritanceLink',
'outgoing': [2, 1],
'incoming': [],
'truthvalue':
{
'type': 'simple',
'details':
{
'count': '0.4000000059604645',
'confidence': '0.0004997501382604241',
'strength': '0.5'
}
}
'attentionvalue':
{
'lti': 0,
'sti': 0,
'vlti': false
}
},
...
]
}
}
</pre>
<p>Examples using optional predefined filters:
<dl>
<dt>Get all atoms in the AttentionalFocus</dt>
<dd>URI: <code>atoms?filterby=attentionalfocus</dd>
<dt>Get all atoms in the STI range between 5 (inclusive) and 10 (inclusive)</dt>
<dd>URI: <code>atoms?filterby=stirange&stimin=5&stimax=10</code></dd>
<dt>Get all atoms with STI greater than or equal to 5</dt>
<dd>URI: <code>atoms?filterby=stirange&stimin=5</code></dd>
</dl>
''',
responseClass=Atom,
nickname='get',
parameters=[
{
'name': 'id',
'description': '''to specifically request an atom by handle,
can be combined with <code>includeIncoming</code> or <code>includeOutgoing</code> only;
if specified, other query parameters will have no effect) Atom handle''',
'required': False,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'path'
},
{
'name': 'type',
'description': '<a href="http://wiki.opencog.org/w/OpenCog_Atom_types">OpenCog Atom type</a>',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
},
{
'name': 'name',
'description': '''(not allowed for Link types). If neither
<code>type</code> or <code>name</code> are provided,
all atoms will be retrieved.''',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
},
{
'name': 'filterby',
'description': '''(can't be combined with type or name)
Allows certain predefined filters
<dl>
<dt>stirange</dt>
<dd>The filter 'stirange' allows the additional parameters 'stimin'
(required, int) and 'stimax' (optional, int) and returns the atoms
in a given STI range</dd>
<dt>attentionalfocus</dt>
<dd>The filter 'attentionalfocus' (boolean) returns the atoms in the
AttentionalFocus</dd>
</dl>''',
'required': False,
'allowMultiple': False,
'dataType': 'stirange | attentionalfocus',
'paramType': 'query'
},
{
'name': 'stimin',
'description': '''Only return atoms with STI (Short Term Importance)
greater than or equal to this amount (only usable with <code>filterby=stirange</code>)''',
'required': False,
'allowMultiple': False,
'dataType': 'float',
'paramType': 'query'
},
{
'name': 'stimax',
'description': '''Only return atoms with STI (Short Term Importance)
less than or equal to this amount (only usable with <code>filterby=stirange</code>)''',
'required': False,
'allowMultiple': False,
'dataType': 'float',
'paramType': 'query'
},
{
'name': 'tvStrengthMin',
'description': '''Only return atoms with
TruthValue strength greater than or equal to this amount''',
'required': False,
'allowMultiple': False,
'dataType': 'float',
'paramType': 'query'
},
{
'name': 'tvConfidenceMin',
'description': '''Only return atoms with
TruthValue confidence greater than or equal to this amount''',
'required': False,
'allowMultiple': False,
'dataType': 'float',
'paramType': 'query'
},
{
'name': 'tvCountMin',
'description': '''Only return atoms with
TruthValue count greater than or equal to this amount''',
'required': False,
'allowMultiple': False,
'dataType': 'float',
'paramType': 'query'
},
{
'name': 'includeIncoming',
'description': '''Returns the conjunction of
the set of atoms and their incoming sets''',
'required': False,
'allowMultiple': False,
'dataType': 'boolean',
'paramType': 'query'
},
{
'name': 'includeOutgoing',
'description': '''Returns the conjunction of
the set of atoms and their outgoing sets. Useful in combination
with includeIncoming.''',
'required': False,
'allowMultiple': False,
'dataType': 'boolean',
'paramType': 'query'
},
{
'name': 'dot',
'description': '''Returns the atom set represented in
the DOT graph description language
(See <a href="https://github.com/opencog/opencog/blob/master/opencog/python/graph_description/README.md">opencog/python/graph_description/README.md</a> for details)''',
'required': False,
'allowMultiple': False,
'dataType': 'boolean',
'paramType': 'query'
},
{
'name': 'limit',
'description': '''To specify the maximum number of atoms to be returned.
If the query results are greater than the number specified by
<code>limit</code>, then the result set list is truncated to the
first <code>limit</code> number of atoms.''',
'required': False,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'query'
},
{
'name': 'callback',
'description': '''JavaScript callback function for JSONP support''',
'required': False,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'query'
}
],
responseMessages=[
{'code': 200, 'message': 'Returned list of atoms matching specified criteria'},
{'code': 400, 'message': 'Invalid request: stirange filter requires stimin parameter'}
]
)
def get(self, id=""):
retval = jsonify({'error':'Internal error'})
try:
retval = self._get(id=id)
except Exception,e:
retval = jsonify({'error':str(e)})
return retval
def _get(self, id=""):
"""
Returns a list of atoms matching the specified criteria
"""
args = self.reqparse.parse_args()
type = args.get('type')
name = args.get('name')
callback = args.get('callback')
filter_by = args.get('filterby')
sti_min = args.get('stimin')
sti_max = args.get('stimax')
tv_strength_min = args.get('tvStrengthMin')
tv_confidence_min = args.get('tvConfidenceMin')
tv_count_min = args.get('tvCountMin')
include_incoming = args.get('includeIncoming')
include_outgoing = args.get('includeOutgoing')
dot_format = args.get('dot')
limit = args.get('limit')
if id != "":
try:
atom = self.atomspace[Handle(id)]
atoms = [atom]
except IndexError:
atoms = []
# abort(404, 'Handle not found')
else:
# First, check if there is a valid filter type, and give it
# precedence if it exists
valid_filter = False
if filter_by is not None:
if filter_by == 'stirange':
if sti_min is not None:
valid_filter = True
atoms = self.atomspace.get_atoms_by_av(sti_min, sti_max)
else:
abort(400, 'Invalid request: stirange filter requires '
'stimin parameter')
elif filter_by == 'attentionalfocus':
valid_filter = True
atoms = self.atomspace.get_atoms_in_attentional_focus()
# If there is not a valid filter type, proceed to select by type
# or name
if not valid_filter:
if type is None and name is None:
atoms = self.atomspace.get_atoms_by_type(types.Atom)
elif name is None:
atoms = self.atomspace.get_atoms_by_type(
types.__dict__.get(type))
else:
if type is None:
type = 'Node'
atoms = self.atomspace.get_atoms_by_name(
t=types.__dict__.get(type), name=name)
# Optionally, filter by TruthValue
if tv_strength_min is not None:
atoms = [atom for atom in atoms if atom.tv.mean >=
tv_strength_min]
if tv_confidence_min is not None:
atoms = [atom for atom in atoms if atom.tv.confidence >=
tv_confidence_min]
if tv_count_min is not None:
atoms = [atom for atom in atoms if atom.tv.count >=
tv_count_min]
# Optionally, include the incoming set
if include_incoming in ['True', 'true', '1']:
atoms = self.atomspace.include_incoming(atoms)
# Optionally, include the outgoing set
if include_outgoing in ['True', 'true', '1']:
atoms = self.atomspace.include_outgoing(atoms)
# Optionally, limit number of atoms returned
if limit is not None:
if len(atoms) > limit:
atoms = atoms[0:limit]
# The default is to return the atom set as JSON atoms. Optionally, a
# DOT return format is also supported
if dot_format not in ['True', 'true', '1']:
atom_list = AtomListResponse(atoms)
json_data = {'result': atom_list.format()}
# if callback function supplied, pad the JSON data (i.e. JSONP):
if callback is not None:
response = str(callback) + '(' + json.dumps(json_data) + ');'
return current_app.response_class(
response, mimetype='application/javascript')
else:
return current_app.response_class(
json.dumps(json_data), mimetype='application/json')
else:
dot_output = dot.get_dot_representation(atoms)
return jsonify({'result': dot_output})
@swagger.operation(
notes='''
Include data with the POST request providing a JSON representation of
the atom.
<p>Examples:
<p>Node:
<pre>
{
'type': 'ConceptNode',
'name': 'Frog',
'truthvalue':
{
'type': 'simple',
'details':
{
'strength': 0.8,
'count': 0.2
}
}
}
</pre>
<p>Link:
<pre>
{
'type': 'InheritanceLink',
'outgoing': [1, 2],
'truthvalue':
{
'type': 'simple',
'details':
{
'strength': 0.5,
'count': 0.4
}
}
}
</pre>
<p>Returns a JSON representation of an atom list containing
the atom. Example:
<pre>
{
'atoms':
{
'handle': 6,
'name': '',
'type': 'InheritanceLink',
'outgoing': [2, 1],
'incoming': [],
'truthvalue':
{
'type': 'simple',
'details':
{
'count': '0.4000000059604645',
'confidence': '0.0004997501382604241',
'strength': '0.5'
}
},
'attentionvalue':
{
'lti': 0,
'sti': 0,
'vlti': false
}
}
}
</pre>''',
responseClass=Atom,
nickname='post',
parameters=[
{
'name': 'type',
'description': '<a href="http://wiki.opencog.org/w/OpenCog_Atom_types">OpenCog Atom type</a>',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'body'
},
{
'name': 'name',
'description': '''(required for Node types, not allowed for Link types) Atom name''',
'required': True,
'allowMultiple': False,
'dataType': 'string',
'paramType': 'body'
},
{
'name': 'truthvalue',
'description': '''<a href="http://wiki.opencog.org/w/TruthValue">TruthValue</a>, formatted as follows:
<dl>
<dt><code>type</code> (required)</dt>
<dd><a href="http://wiki.opencog.org/w/TruthValue">TruthValue type</a>
(only 'simple' is currently available)</dd>
<dt><code>details</code> (required)</dt>
<dd>TruthValue parameters, formatted as follows:
<ul>
<li>strength (required)</li>
<li>count (required)</li>
</ul>
</dd>
</dl>''',
'required': True,
'allowMultiple': False,
'dataType': 'TruthValue',
'paramType': 'body'
},
{
'name': 'outgoing',
'description': '''The set of arguments of the relation, formatted as
<a href="http://wiki.opencog.org/w/Link#Incoming_and_Outgoing_Sets">a list of Atom handles (only valid for Links, not nodes)</a>''',
'required': False,
'allowMultiple': False,
'dataType': 'list',
'paramType': 'body'
}
],
responseMessages=[
{'code': 200, 'message': 'Created specified list of atoms'},
{'code': 400, 'message': 'Invalid type or required parameter type missing'},
{'code': 500, 'message': 'Error processing request. Check your parameters'}
]
)
def post(self):
"""
Creates a new atom. If the atom already exists, it updates the atom.
"""
# Prepare the atom data and validate it
data = reqparse.request.get_json()
if 'type' in data:
if data['type'] in types.__dict__:
type = types.__dict__.get(data['type'])
else:
abort(400, 'Invalid request: type \'' + type + '\' is not a '
'valid type')
else:
abort(400, 'Invalid request: required parameter type is missing')
# TruthValue
tv = ParseTruthValue.parse(data)
# Outgoing set
if 'outgoing' in data:
if len(data['outgoing']) > 0:
outgoing = [Handle(h) for h in data['outgoing']]
else:
outgoing = None
# Name
name = data['name'] if 'name' in data else None
# Nodes must have names
if is_a(type, types.Node):
if name is None:
abort(400, 'Invalid request: node type specified and required '
'parameter name is missing')
# Links can't have names
else:
if name is not None:
abort(400, 'Invalid request: parameter name is not allowed for '
'link types')
try:
atom = self.atomspace.add(t=type, name=name, tv=tv, out=outgoing)
except TypeError:
abort(500, 'Error while processing your request. Check your '
'parameters.')
return {'atoms': marshal(atom, atom_fields)}
@swagger.operation(
notes='''
URI: <code>atoms/[id]</code>
<p>Include data with the PUT request providing a JSON representation of
the updated attributes.
<p>Example:
<pre>
{
'truthvalue':
{
'type': 'simple',
'details':
{
'strength': 0.005,
'count': 0.8
}
},
'attentionvalue':
{
'sti': 9,
'lti': 2,
'vlti': True
}
}
</pre>
<p>Returns a JSON representation of an atom list
containing the atom.
<p>Example:
<pre>
{ 'atoms':
{
'handle': 6,
'name': '',
'type': 'InheritanceLink',
'outgoing': [2, 1],
'incoming': [],
'truthvalue':
{
'type': 'simple',
'details':
{
'count': '0.4000000059604645',
'confidence': '0.0004997501382604241',
'strength': '0.5'
}
},
'attentionvalue':
{
'lti': 0,
'sti': 0,
'vlti': false
}
}
}
}
</pre>''',
responseClass=Atom,
nickname='put',
parameters=[
{
'name': 'id',
'description': '<a href="http://wiki.opencog.org/w/Handle">Atom handle</a>',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'path'
},
{
'name': 'truthvalue',
'description': '''<a href="http://wiki.opencog.org/w/TruthValue">TruthValue</a>, formatted as follows:
<dl>
<dt><code>type</code> (required)</dt>
<dd><a href="http://wiki.opencog.org/w/TruthValue">TruthValue type</a>
(only 'simple' is currently available)</dd>
<dt><code>details</code> (required)</dt>
<dd>TruthValue parameters, formatted as follows:
<ul>
<li>strength (required)</li>
<li>count (required)</li>
</ul>
</dd>
</dl>''',
'required': False,
'allowMultiple': False,
'dataType': 'TruthValue',
'paramType': 'body'
},
{
'name': 'attentionvalue',
'description': '''<a href="http://wiki.opencog.org/w/AttentionValue">AttentionValue</a>, formatted as follows:
<dl>
<dt><code>sti</code> (optional)</dt>
<dd>Short-Term Importance</dd>
<dt><code>lti</code> (optional)</dt>
<dd>Long-Term Importance</dd>
<dt><code>vlti</code> (optional)</dt>
<dd>Very-Long-Term Importance</dd>
</dl>''',
'required': False,
'allowMultiple': False,
'dataType': 'AttentionValue',
'paramType': 'body'
}
],
responseMessages=[
{'code': 200, 'message': 'Atom truth and/or attention value updated'},
{'code': 400, 'message': 'Invalid type or required parameter type missing'},
{'code': 404, 'message': 'Handle not found'}
]
)
def put(self, id):
"""
Updates the AttentionValue (STI, LTI, VLTI) or TruthValue of an atom
"""
if Handle(id) not in self.atomspace:
abort(404, 'Handle not found')
# Prepare the atom data
data = reqparse.request.get_json()
if 'truthvalue' not in data and 'attentionvalue' not in data:
abort(400, 'Invalid request: you must include a truthvalue or '
'attentionvalue parameter')
if 'truthvalue' in data:
tv = ParseTruthValue.parse(data)
self.atomspace.set_tv(h=Handle(id), tv=tv)
if 'attentionvalue' in data:
(sti, lti, vlti) = ParseAttentionValue.parse(data)
self.atomspace.set_av(h=Handle(id), sti=sti, lti=lti, vlti=vlti)
atom = self.atomspace[Handle(id)]
return {'atoms': marshal(atom, atom_fields)}
@swagger.operation(
notes='''
Returns a JSON representation of the result, indicating success or failure.
<p>Example:
<pre>
{
'result':
{
'handle': 2,
'success': 'true'
}
}
</pre>''',
responseClass='result',
nickname='delete',
parameters=[
{
'name': 'id',
'description': '<a href="http://wiki.opencog.org/w/Handle">Atom handle</a>',
'required': True,
'allowMultiple': False,
'dataType': 'int',
'paramType': 'path'
}
],
responseMessages=[
{'code': 200, 'message': 'Deleted the atom'},
{'code': 404, 'message': 'Handle not found'},
]
)
def delete(self, id):
"""
Removes an atom from the AtomSpace
"""
if Handle(id) not in self.atomspace:
abort(404, 'Handle not found')
else:
atom = self.atomspace[Handle(id)]
status = self.atomspace.remove(atom)
response = DeleteAtomResponse(id, status)
return {'result': response.format()}
| agpl-3.0 |
0jpq0/kbengine | kbe/res/scripts/common/Lib/test/test_exceptions.py | 75 | 34461 | # Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle
import weakref
import errno
from test.support import (TESTFN, captured_output, check_impl_detail,
check_warnings, cpython_only, gc_collect, run_unittest,
no_tracing, unlink)
class NaiveException(Exception):
def __init__(self, x):
self.x = x
class SlottedNaiveException(Exception):
__slots__ = ('x',)
def __init__(self, x):
self.x = x
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def raise_catch(self, exc, excname):
try:
raise exc("spam")
except exc as err:
buf1 = str(err)
try:
raise exc("spam")
except exc as err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
import marshal
marshal.loads(b'')
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(OSError, "OSError")
self.assertRaises(OSError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec('/\n')
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n",
'<string>', 'exec')
except TabError: pass
else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 17<<16)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception as e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSyntaxErrorOffset(self):
def check(src, lineno, offset):
with self.assertRaises(SyntaxError) as cm:
compile(src, '<fragment>', 'exec')
self.assertEqual(cm.exception.lineno, lineno)
self.assertEqual(cm.exception.offset, offset)
check('def fact(x):\n\treturn x!\n', 2, 10)
check('1 +\n', 1, 4)
check('def spam():\n print(1)\n print(2)', 3, 10)
check('Python = "Python" +', 1, 20)
check('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', 1, 20)
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException(Exception):
def __init__(self_):
raise RuntimeError("can't instantiate BadException")
class InvalidException:
pass
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
def test_capi3():
import _testcapi
self.assertRaises(SystemError, _testcapi.raise_exception,
InvalidException, 1)
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
test_capi3()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertIs(WindowsError, OSError)
self.assertEqual(str(OSError(1001)), "1001")
self.assertEqual(str(OSError(1001, "message")),
"[Errno 1001] message")
# POSIX errno (9 aka EBADF) is untranslated
w = OSError(9, 'foo', 'bar')
self.assertEqual(w.errno, 9)
self.assertEqual(w.winerror, None)
self.assertEqual(str(w), "[Errno 9] foo: 'bar'")
# ERROR_PATH_NOT_FOUND (win error 3) becomes ENOENT (2)
w = OSError(0, 'foo', 'bar', 3)
self.assertEqual(w.errno, 2)
self.assertEqual(w.winerror, 3)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, 'bar')
self.assertEqual(str(w), "[WinError 3] foo: 'bar'")
# Unknown win error becomes EINVAL (22)
w = OSError(0, 'foo', None, 1001)
self.assertEqual(w.errno, 22)
self.assertEqual(w.winerror, 1001)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
self.assertEqual(str(w), "[WinError 1001] foo")
# Non-numeric "errno"
w = OSError('bar', 'foo')
self.assertEqual(w.errno, 'bar')
self.assertEqual(w.winerror, None)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'args' : ()}),
(BaseException, (1, ), {'args' : (1,)}),
(BaseException, ('foo',),
{'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'args' : ('foo',), 'code' : 'foo'}),
(OSError, ('foo',),
{'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(OSError, ('foo', 'bar'),
{'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz'),
{'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz', None, 'quux'),
{'args' : ('foo', 'bar'), 'filename' : 'baz', 'filename2': 'quux'}),
(OSError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(OSError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'args' : (),}),
(UnicodeEncodeError, ('ascii', 'a', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', 'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : 'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
{'args' : ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
{'args' : ('\u3042', 0, 1, 'ouch'),
'object' : '\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
(NaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
(SlottedNaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
]
try:
# More tests are in test_WindowsError
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : None,
'errno' : 1, 'filename' : 'filenameStr'})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
e = exc(*args)
except:
print("\nexc=%r, args=%r" % (exc, args), file=sys.stderr)
raise
else:
# Verify module name
if not type(e).__name__.endswith('NaiveException'):
self.assertEqual(type(e).__module__, 'builtins')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
value = getattr(e, checkArgName)
self.assertEqual(repr(value),
repr(expected[checkArgName]),
'%r.%s == %r, expected %r' % (
e, checkArgName,
value, expected[checkArgName]))
# test for pickling support
for p in [pickle]:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
s = p.dumps(e, protocol)
new = p.loads(s)
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s' %
(e, checkArgName))
def testWithTraceback(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = BaseException().with_traceback(tb)
self.assertIsInstance(e, BaseException)
self.assertEqual(e.__traceback__, tb)
e = IndexError(5).with_traceback(tb)
self.assertIsInstance(e, IndexError)
self.assertEqual(e.__traceback__, tb)
class MyException(Exception):
pass
e = MyException().with_traceback(tb)
self.assertIsInstance(e, MyException)
self.assertEqual(e.__traceback__, tb)
def testInvalidTraceback(self):
try:
Exception().__traceback__ = 5
except TypeError as e:
self.assertIn("__traceback__ must be a traceback", str(e))
else:
self.fail("No exception raised")
def testInvalidAttrs(self):
self.assertRaises(TypeError, setattr, Exception(), '__cause__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__cause__')
self.assertRaises(TypeError, setattr, Exception(), '__context__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__context__')
def testNoneClearsTracebackAttr(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = Exception()
e.__traceback__ = tb
e.__traceback__ = None
self.assertEqual(e.__traceback__, None)
def testChainingAttrs(self):
e = Exception()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
e = TypeError()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
class MyException(OSError):
pass
e = MyException()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
def testChainingDescriptors(self):
try:
raise Exception()
except Exception as exc:
e = exc
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
e.__context__ = NameError()
e.__cause__ = None
self.assertIsInstance(e.__context__, NameError)
self.assertIsNone(e.__cause__)
self.assertTrue(e.__suppress_context__)
e.__suppress_context__ = False
self.assertFalse(e.__suppress_context__)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
@no_tracing
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
self.assertRaises(RuntimeError, g)
def test_str(self):
# Make sure both instances and classes have a str representation.
self.assertTrue(str(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(str(Exception('a', 'b')))
def testExceptionCleanupNames(self):
# Make sure the local variable bound to the exception instance by
# an "except" statement is only visible inside the except block.
try:
raise Exception()
except Exception as e:
self.assertTrue(e)
del e
self.assertNotIn('e', locals())
def testExceptionCleanupState(self):
# Make sure exception state is cleaned up as soon as the except
# block is left. See #2507
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def inner_raising_func():
# Create some references in exception value and traceback
local_ref = obj
raise MyException(obj)
# Qualified "except" with "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException as e:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Qualified "except" without "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Bare "except"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# "except" with premature block leave
obj = MyObj()
wr = weakref.ref(obj)
for i in [0]:
try:
inner_raising_func()
except:
break
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# "except" block raising another exception
obj = MyObj()
wr = weakref.ref(obj)
try:
try:
inner_raising_func()
except:
raise KeyError
except KeyError as e:
# We want to test that the except block above got rid of
# the exception raised in inner_raising_func(), but it
# also ends up in the __context__ of the KeyError, so we
# must clear the latter manually for our test to succeed.
e.__context__ = None
obj = None
obj = wr()
# guarantee no ref cycles on CPython (don't gc_collect)
if check_impl_detail(cpython=False):
gc_collect()
self.assertTrue(obj is None, "%s" % obj)
# Some complicated construct
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
try:
try:
raise
finally:
raise
except MyException:
pass
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Inside an exception-silencing "with" block
class Context:
def __enter__(self):
return self
def __exit__ (self, exc_type, exc_value, exc_tb):
return True
obj = MyObj()
wr = weakref.ref(obj)
with Context():
inner_raising_func()
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
def test_exception_target_in_nested_scope(self):
# issue 4617: This used to raise a SyntaxError
# "can not delete variable 'e' referenced in nested scope"
def print_error():
e
try:
something
except Exception as e:
print_error()
# implicit "del e" here
def test_generator_leaking(self):
# Test that generator exception state doesn't leak into the calling
# frame
def yield_raise():
try:
raise KeyError("caught")
except KeyError:
yield sys.exc_info()[0]
yield sys.exc_info()[0]
yield sys.exc_info()[0]
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), None)
# Same test, but inside an exception handler
try:
raise TypeError("foo")
except TypeError:
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), TypeError)
del g
self.assertEqual(sys.exc_info()[0], TypeError)
def test_generator_leaking2(self):
# See issue 12475.
def g():
yield
try:
raise RuntimeError
except RuntimeError:
it = g()
next(it)
try:
next(it)
except StopIteration:
pass
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_doesnt_retain_old_exc(self):
def g():
self.assertIsInstance(sys.exc_info()[1], RuntimeError)
yield
self.assertEqual(sys.exc_info(), (None, None, None))
it = g()
try:
raise RuntimeError
except RuntimeError:
next(it)
self.assertRaises(StopIteration, next, it)
def test_generator_finalizing_and_exc_info(self):
# See #7173
def simple_gen():
yield 1
def run_gen():
gen = simple_gen()
try:
raise RuntimeError
except RuntimeError:
return next(gen)
run_gen()
gc_collect()
self.assertEqual(sys.exc_info(), (None, None, None))
def _check_generator_cleanup_exc_state(self, testfunc):
# Issue #12791: exception state is cleaned up as soon as a generator
# is closed (reference cycles are broken).
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def raising_gen():
try:
raise MyException(obj)
except MyException:
yield
obj = MyObj()
wr = weakref.ref(obj)
g = raising_gen()
next(g)
testfunc(g)
g = obj = None
obj = wr()
self.assertIs(obj, None)
def test_generator_throw_cleanup_exc_state(self):
def do_throw(g):
try:
g.throw(RuntimeError())
except RuntimeError:
pass
self._check_generator_cleanup_exc_state(do_throw)
def test_generator_close_cleanup_exc_state(self):
def do_close(g):
g.close()
self._check_generator_cleanup_exc_state(do_close)
def test_generator_del_cleanup_exc_state(self):
def do_del(g):
g = None
self._check_generator_cleanup_exc_state(do_del)
def test_generator_next_cleanup_exc_state(self):
def do_next(g):
try:
next(g)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_next)
def test_generator_send_cleanup_exc_state(self):
def do_send(g):
try:
g.send(None)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_send)
def test_3114(self):
# Bug #3114: in its destructor, MyObject retrieves a pointer to
# obsolete and/or deallocated objects.
class MyObject:
def __del__(self):
nonlocal e
e = sys.exc_info()
e = ()
try:
raise Exception(MyObject())
except:
pass
self.assertEqual(e, (None, None, None))
def test_unicode_change_attributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', b'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError('xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_unicode_errors_no_object(self):
# See issue #21134.
klasses = UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError
for klass in klasses:
self.assertEqual(str(klass.__new__(klass)), "")
@no_tracing
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception, metaclass=Meta):
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException as e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assertTrue(isinstance(v, RuntimeError), type(v))
self.assertIn("maximum recursion depth exceeded", str(v))
@cpython_only
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
import traceback
from _testcapi import raise_memoryerror
def raiseMemError():
try:
raise_memoryerror()
except MemoryError as e:
tb = e.__traceback__
else:
self.fail("Should have raises a MemoryError")
return traceback.format_tb(tb)
tb1 = raiseMemError()
tb2 = raiseMemError()
self.assertEqual(tb1, tb2)
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
@cpython_only
def test_memory_error_cleanup(self):
# Issue #5437: preallocated MemoryError instances should not keep
# traceback objects alive.
from _testcapi import raise_memoryerror
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
raise_memoryerror()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except MemoryError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("MemoryError not raised")
self.assertEqual(wr(), None)
@no_tracing
def test_recursion_error_cleanup(self):
# Same test as above, but with "recursion exceeded" errors
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
inner()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except RuntimeError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("RuntimeError not raised")
self.assertEqual(wr(), None)
def test_errno_ENOTDIR(self):
# Issue #12802: "not a directory" errors are ENOTDIR even on Windows
with self.assertRaises(OSError) as cm:
os.listdir(__file__)
self.assertEqual(cm.exception.errno, errno.ENOTDIR, cm.exception)
class ImportErrorTests(unittest.TestCase):
def test_attributes(self):
# Setting 'name' and 'path' should not be a problem.
exc = ImportError('test')
self.assertIsNone(exc.name)
self.assertIsNone(exc.path)
exc = ImportError('test', name='somemodule')
self.assertEqual(exc.name, 'somemodule')
self.assertIsNone(exc.path)
exc = ImportError('test', path='somepath')
self.assertEqual(exc.path, 'somepath')
self.assertIsNone(exc.name)
exc = ImportError('test', path='somepath', name='somename')
self.assertEqual(exc.name, 'somename')
self.assertEqual(exc.path, 'somepath')
def test_non_str_argument(self):
# Issue #15778
with check_warnings(('', BytesWarning), quiet=True):
arg = b'abc'
exc = ImportError(arg)
self.assertEqual(str(arg), str(exc))
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
citrix-openstack-build/python-cinderclient | cinderclient/tests/test_service_catalog.py | 3 | 10492 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinderclient import exceptions
from cinderclient import service_catalog
from cinderclient.tests import utils
# Taken directly from keystone/content/common/samples/auth.json
# Do not edit this structure. Instead, grab the latest from there.
SERVICE_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
"id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalog": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Cinder Volume Service",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v1/1234",
"internalURL": "https://volume1.host/v1/1234",
"region": "South",
"versionId": "1.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v1/3456",
"internalURL": "https://volume1.host/v1/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v1/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
{
"name": "Cinder Volume Service V2",
"type": "volumev2",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v2/1234",
"internalURL": "https://volume1.host/v2/1234",
"region": "South",
"versionId": "2.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v2/3456",
"internalURL": "https://volume1.host/v2/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v2/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
],
"serviceCatalog_links": [
{
"rel": "next",
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
},
],
},
}
SERVICE_COMPATIBILITY_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
"id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalog": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Cinder Volume Service V2",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v2/1234",
"internalURL": "https://volume1.host/v2/1234",
"region": "South",
"versionId": "2.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v2/3456",
"internalURL": "https://volume1.host/v2/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v2/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
],
"serviceCatalog_links": [
{
"rel": "next",
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
},
],
},
}
class ServiceCatalogTest(utils.TestCase):
def test_building_a_service_catalog(self):
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
service_type='compute')
self.assertEquals(sc.url_for('tenantId', '1', service_type='compute'),
"https://compute1.host/v1/1234")
self.assertEquals(sc.url_for('tenantId', '2', service_type='compute'),
"https://compute1.host/v1/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
"region", "South", service_type='compute')
def test_alternate_service_type(self):
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
service_type='volume')
self.assertEquals(sc.url_for('tenantId', '1', service_type='volume'),
"https://volume1.host/v1/1234")
self.assertEquals(sc.url_for('tenantId', '2', service_type='volume'),
"https://volume1.host/v1/3456")
self.assertEquals(sc.url_for('tenantId', '2', service_type='volumev2'),
"https://volume1.host/v2/3456")
self.assertEquals(sc.url_for('tenantId', '2', service_type='volumev2'),
"https://volume1.host/v2/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
"region", "North", service_type='volume')
def test_compatibility_service_type(self):
sc = service_catalog.ServiceCatalog(SERVICE_COMPATIBILITY_CATALOG)
self.assertEquals(sc.url_for('tenantId', '1', service_type='volume'),
"https://volume1.host/v2/1234")
self.assertEquals(sc.url_for('tenantId', '2', service_type='volume'),
"https://volume1.host/v2/3456")
| apache-2.0 |
josjevv/django-cms | cms/south_migrations/0023_plugin_table_naming_function_changed.py | 1680 | 20032 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
Servir-Mekong/ecodash | lib/ee/customfunction.py | 9 | 4942 | #!/usr/bin/env python
"""An object representing a custom EE Function."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import computedobject
import ee_types
import function
import serializer
class CustomFunction(function.Function):
"""An object representing a custom EE Function."""
def __init__(self, signature, body):
"""Creates a function defined by a given expression with unbound variables.
The expression is created by evaluating the given function
using variables as placeholders.
Args:
signature: The function signature. If any of the argument names are
null, their names will be generated deterministically, based on
the body.
body: The Python function to evaluate.
"""
variables = [CustomFunction.variable(arg['type'], arg['name'])
for arg in signature['args']]
# The signature of the function.
self._signature = CustomFunction._resolveNamelessArgs(
signature, variables, body)
# The expression to evaluate.
self._body = body(*variables)
def encode(self, encoder):
return {
'type': 'Function',
'argumentNames': [x['name'] for x in self._signature['args']],
'body': encoder(self._body)
}
def getSignature(self):
"""Returns a description of the interface provided by this function."""
return self._signature
@staticmethod
def variable(type_name, name):
"""Returns a placeholder variable with a given name and EE type.
Args:
type_name: A class to mimic.
name: The name of the variable as it will appear in the
arguments of the custom functions that use this variable. If null,
a name will be auto-generated in _resolveNamelessArgs().
Returns:
A variable with the given name implementing the given type.
"""
var_type = ee_types.nameToClass(type_name) or computedobject.ComputedObject
result = var_type.__new__(var_type)
result.func = None
result.args = None
result.varName = name
return result
@staticmethod
def create(func, return_type, arg_types):
"""Creates a CustomFunction.
The result calls a given native function with the specified return type and
argument types and auto-generated argument names.
Args:
func: The native function to wrap.
return_type: The type of the return value, either as a string or a
class reference.
arg_types: The types of the arguments, either as strings or class
references.
Returns:
The constructed CustomFunction.
"""
def StringifyType(t):
return t if isinstance(t, basestring) else ee_types.classToName(t)
args = [{'name': None, 'type': StringifyType(i)} for i in arg_types]
signature = {
'name': '',
'returns': StringifyType(return_type),
'args': args
}
return CustomFunction(signature, func)
@staticmethod
def _resolveNamelessArgs(signature, variables, body):
"""Deterministically generates names for unnamed variables.
The names are based on the body of the function.
Args:
signature: The signature which may contain null argument names.
variables: A list of variables, some of which may be nameless.
These will be updated to include names when this method returns.
body: The Python function to evaluate.
Returns:
The signature with null arg names resolved.
"""
nameless_arg_indices = []
for i, variable in enumerate(variables):
if variable.varName is None:
nameless_arg_indices.append(i)
# Do we have any nameless arguments at all?
if not nameless_arg_indices:
return signature
# Generate the name base by counting the number of custom functions
# within the body.
def CountFunctions(expression):
"""Counts the number of custom functions in a serialized expression."""
count = 0
if isinstance(expression, dict):
if expression.get('type') == 'Function':
# Technically this allows false positives if one of the user
# dictionaries contains type=Function, but that does not matter
# for this use case, as we only care about determinism.
count += 1
else:
for sub_expression in expression.itervalues():
count += CountFunctions(sub_expression)
elif isinstance(expression, (list, tuple)):
for sub_expression in expression:
count += CountFunctions(sub_expression)
return count
serialized_body = serializer.encode(body(*variables))
base_name = '_MAPPING_VAR_%d_' % CountFunctions(serialized_body)
# Update the vars and signature by the name.
for (i, index) in enumerate(nameless_arg_indices):
name = base_name + str(i)
variables[index].varName = name
signature['args'][index]['name'] = name
return signature
| gpl-3.0 |
taylorhardy/ConferenceApp | utils.py | 384 | 1576 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
| apache-2.0 |
kevin-coder/tensorflow-fork | tensorflow/compat_template_v1.__init__.py | 7 | 1605 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import os as _os
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
# API IMPORTS PLACEHOLDER
from tensorflow.python.tools import component_api_helper as _component_api_helper
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=(
'tensorflow_estimator.python.estimator.api._v1.estimator'))
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=('tensorflow.python.keras.api._v1.keras'))
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
app.flags = flags # pylint: disable=undefined-variable
| apache-2.0 |
hungpham2511/toppra | toppra/constraint/conic_constraint.py | 1 | 4217 | from .constraint import Constraint
from .constraint import ConstraintType, DiscretizationType
import numpy as np
class ConicConstraint(Constraint):
"""Base class for all canonical conic constraints.
A canonical conic constraint is one with the following form
.. math::
(a[i] + da[i]) u + (b[i] + db[i]) x + (c[i] + dc[i]) \leq 0, \\\\
[da[i, j], db[i, j], dc[i, j]]^\top = P[i, j] u, \|u\|_2 \leq 1,
where P[i, j] is a 3x3 matrix. Notice that by setting P[i, j] to
the zero matrix, this is the same as the original constraint.
Constraints of this form can be translated to conic-quadratic
constraints. This transformation can be found in [1]. The
resulting conic-quadratic constraint is given below
.. math::
a[i, j]u + b[i, j]x + c[i, j] + \|P[i, j]^T [u, x, 1]^T \|_2 \leq 0,
where i is the stage index, and j is the constraint index.
Refs:
----
[1] Ben-Tal, A., & Nemirovski, A. (2001). Lectures on modern convex
optimization: analysis, algorithms, and engineering applications
(Vol. 2). Siam.
"""
def __init__(self):
self.constraint_type = ConstraintType.CanonicalConic
self.discretization_type = DiscretizationType.Collocation
self.n_extra_vars = 0
self.dof = -1
self._format_string = ""
def compute_constraint_params(self, path, gridpoints):
raise NotImplementedError
class RobustLinearConstraint(ConicConstraint):
"""The simple canonical conic constraint.
This constraint can be seen as a robustified version of a
CanonicalLinear constraint. In particular, the perturbations term,
[\Delta a[i, j], \Delta b[i, j], \Delta c[i, j]] is assumed to lie
in a centered ellipsoid:
.. math::
[\Delta a[i, j], \Delta b[i, j], \Delta c[i, j]]^\\top = diag(ru, rx, rc) \mathbf e,
where \|\mathbf e\|_2 \leq 1.
Parameters
----------
cnst: :class:`~toppra.constraint.LinearConstraint`
The base constraint to robustify.
ellipsoid_axes_lengths: (3,)array
Lengths of the axes of the perturbation ellipsoid. Must all be
non-negative.
discretization_scheme: :class:`~.constraint.DiscretizationType`
Constraint discretization scheme to use.
"""
def __init__(
self,
cnst,
ellipsoid_axes_lengths,
discretization_scheme=DiscretizationType.Collocation,
):
super(RobustLinearConstraint, self).__init__()
self.dof = cnst.get_dof()
assert cnst.get_constraint_type() == ConstraintType.CanonicalLinear
self.set_discretization_type(discretization_scheme)
if np.any(np.r_[ellipsoid_axes_lengths] < 0):
raise ValueError(
"Perturbation must be non-negative. Input {:}".format(
ellipsoid_axes_lengths
)
)
self.base_constraint = cnst
self.ellipsoid_axes_lengths = ellipsoid_axes_lengths
self._format_string += (
" Robust constraint generated from a canonical linear constraint\n"
)
def compute_constraint_params(self, path, gridpoints):
self.base_constraint.set_discretization_type(self.discretization_type)
a_, b_, c_, F_, g_, u_, x_ = self.base_constraint.compute_constraint_params(
path, gridpoints
)
N = len(gridpoints) - 1
if self.base_constraint.identical:
d = F_.shape[0] # number of rows
else:
d = F_.shape[1]
a = np.zeros((N + 1, d))
b = np.zeros((N + 1, d))
c = np.zeros((N + 1, d))
if self.base_constraint.identical:
for i in range(len(gridpoints)):
a[i, :d] = F_.dot(a_[i])
b[i, :d] = F_.dot(b_[i])
c[i, :d] = F_.dot(c_[i]) - g_
else:
for i in range(len(gridpoints)):
a[i, :d] = F_[i].dot(a_[i])
b[i, :d] = F_[i].dot(b_[i])
c[i, :d] = F_[i].dot(c_[i]) - g_[i]
P = np.zeros((N + 1, d + 2, 3, 3))
diag_ = np.diag(self.ellipsoid_axes_lengths)
P[:] = diag_
return a, b, c, P, u_, x_
| mit |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_dist_mnist_hallreduce.py | 2 | 1455 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
import os
import paddle
paddle.enable_static()
flag_name = os.path.splitext(__file__)[0]
class TestDistMnistNCCL2HAllreduce(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._use_reader_alloc = False
self._nccl2_mode = True
# NOTE(wangxi). hallreduce test must use 4cards after nccl>=2.7
self._use_hallreduce = True
def test_dist_train(self):
import paddle.fluid as fluid
if fluid.core.is_compiled_with_cuda():
self.check_with_place(
"dist_mnist.py",
delta=1e-5,
check_error_log=True,
log_name=flag_name)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
shoopio/shoop | shuup/admin/modules/service_providers/wizard_form_defs.py | 2 | 1949 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from shuup import configuration
from shuup.admin.views.wizard import TemplatedWizardFormDef
from .wizard_forms import ManualPaymentWizardForm, ManualShippingWizardForm
class ServiceWizardFormDef(TemplatedWizardFormDef):
priority = 0
def __init__(self, name, form_class, template_name, request, extra_js=""):
shop = request.shop
form_def_kwargs = {
"name": name,
"kwargs": {
"instance": form_class._meta.model.objects.first(),
"languages": configuration.get(shop, "languages", settings.LANGUAGES)
}
}
super(ServiceWizardFormDef, self).__init__(
form_class=form_class,
template_name=template_name,
extra_js=extra_js,
**form_def_kwargs
)
def visible(self):
return True
class ManualShippingWizardFormDef(ServiceWizardFormDef):
priority = 1000
def __init__(self, request):
super(ManualShippingWizardFormDef, self).__init__(
name="manual_shipping",
form_class=ManualShippingWizardForm,
template_name="shuup/admin/service_providers/_wizard_manual_shipping_form.jinja",
request=request
)
class ManualPaymentWizardFormDef(ServiceWizardFormDef):
priority = 1000
def __init__(self, request):
super(ManualPaymentWizardFormDef, self).__init__(
name="manual_payment",
form_class=ManualPaymentWizardForm,
template_name="shuup/admin/service_providers/_wizard_manual_payment_form.jinja",
request=request
)
| agpl-3.0 |
moylop260/odoo-dev | openerp/report/custom.py | 43 | 25208 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import time
import openerp
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
import print_xml
import render
from interface import report_int
import common
from openerp.osv.osv import except_osv
from openerp.osv.orm import browse_null
from openerp.osv.orm import browse_record_list
from pychart import *
import misc
import cStringIO
from lxml import etree
from openerp.tools.translate import _
class external_pdf(render.render):
def __init__(self, pdf):
render.render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
theme.use_color = 1
#TODO: devrait heriter de report_rml a la place de report_int
# -> pourrait overrider que create_xml a la place de tout create
# heuu, ca marche pas ds tous les cas car graphs sont generes en pdf directment
# par pychart, et on passe donc pas par du rml
class report_custom(report_int):
def __init__(self, name):
report_int.__init__(self, name)
#
# PRE:
# fields = [['address','city'],['name'], ['zip']]
# conditions = [[('zip','==','3'),(,)],(,),(,)] #same structure as fields
# row_canvas = ['Rue', None, None]
# POST:
# [ ['ville','name','zip'] ]
#
def _row_get(self, cr, uid, objs, fields, conditions, row_canvas=None, group_by=None):
result = []
for obj in objs:
tobreak = False
for cond in conditions:
if cond and cond[0]:
c = cond[0]
temp = c[0](eval('obj.'+c[1],{'obj': obj}))
if not eval('\''+temp+'\''+' '+c[2]+' '+'\''+str(c[3])+'\''):
tobreak = True
if tobreak:
break
levels = {}
row = []
for i in range(len(fields)):
if not fields[i]:
row.append(row_canvas and row_canvas[i])
if row_canvas[i]:
row_canvas[i]=False
elif len(fields[i])==1:
if not isinstance(obj, browse_null):
row.append(str(eval('obj.'+fields[i][0],{'obj': obj})))
else:
row.append(None)
else:
row.append(None)
levels[fields[i][0]]=True
if not levels:
result.append(row)
else:
# Process group_by data first
key = []
if group_by is not None and fields[group_by] is not None:
if fields[group_by][0] in levels.keys():
key.append(fields[group_by][0])
for l in levels.keys():
if l != fields[group_by][0]:
key.append(l)
else:
key = levels.keys()
for l in key:
objs = eval('obj.'+l,{'obj': obj})
if not isinstance(objs, (browse_record_list, list)):
objs = [objs]
field_new = []
cond_new = []
for f in range(len(fields)):
if (fields[f] and fields[f][0])==l:
field_new.append(fields[f][1:])
cond_new.append(conditions[f][1:])
else:
field_new.append(None)
cond_new.append(None)
if len(objs):
result += self._row_get(cr, uid, objs, field_new, cond_new, row, group_by)
else:
result.append(row)
return result
def create(self, cr, uid, ids, datas, context=None):
if not context:
context={}
self.pool = openerp.registry(cr.dbname)
report = self.pool['ir.report.custom'].browse(cr, uid, [datas['report_id']])[0]
datas['model'] = report.model_id.model
if report.menu_id:
ids = self.pool[report.model_id.model].search(cr, uid, [])
datas['ids'] = ids
report_id = datas['report_id']
report = self.pool['ir.report.custom'].read(cr, uid, [report_id], context=context)[0]
fields = self.pool['ir.report.custom.fields'].read(cr, uid, report['fields_child0'], context=context)
fields.sort(lambda x,y : x['sequence'] - y['sequence'])
if report['field_parent']:
parent_field = self.pool['ir.model.fields'].read(cr, uid, [report['field_parent'][0]], ['model'])
model_name = self.pool['ir.model'].read(cr, uid, [report['model_id'][0]], ['model'], context=context)[0]['model']
fct = {
'id': lambda x: x,
'gety': lambda x: x.split('-')[0],
'in': lambda x: x.split(',')
}
new_fields = []
new_cond = []
for f in fields:
row = []
cond = []
for i in range(4):
field_child = f['field_child'+str(i)]
if field_child:
row.append(
self.pool['ir.model.fields'].read(cr, uid, [field_child[0]], ['name'], context=context)[0]['name']
)
if f['fc'+str(i)+'_operande']:
fct_name = 'id'
cond_op = f['fc'+str(i)+'_op']
if len(f['fc'+str(i)+'_op'].split(',')) == 2:
cond_op = f['fc'+str(i)+'_op'].split(',')[1]
fct_name = f['fc'+str(i)+'_op'].split(',')[0]
cond.append((fct[fct_name], f['fc'+str(i)+'_operande'][1], cond_op, f['fc'+str(i)+'_condition']))
else:
cond.append(None)
new_fields.append(row)
new_cond.append(cond)
objs = self.pool[model_name].browse(cr, uid, ids)
# Group by
groupby = None
idx = 0
for f in fields:
if f['groupby']:
groupby = idx
idx += 1
results = []
if report['field_parent']:
level = []
def build_tree(obj, level, depth):
res = self._row_get(cr, uid,[obj], new_fields, new_cond)
level.append(depth)
new_obj = eval('obj.'+report['field_parent'][1],{'obj': obj})
if not isinstance(new_obj, list) :
new_obj = [new_obj]
for o in new_obj:
if not isinstance(o, browse_null):
res += build_tree(o, level, depth+1)
return res
for obj in objs:
results += build_tree(obj, level, 0)
else:
results = self._row_get(cr, uid,objs, new_fields, new_cond, group_by=groupby)
fct = {
'calc_sum': lambda l: reduce(lambda x,y: float(x)+float(y), filter(None, l), 0),
'calc_avg': lambda l: reduce(lambda x,y: float(x)+float(y), filter(None, l), 0) / (len(filter(None, l)) or 1.0),
'calc_max': lambda l: reduce(lambda x,y: max(x,y), [(i or 0.0) for i in l], 0),
'calc_min': lambda l: reduce(lambda x,y: min(x,y), [(i or 0.0) for i in l], 0),
'calc_count': lambda l: len(filter(None, l)),
'False': lambda l: '\r\n'.join(filter(None, l)),
'groupby': lambda l: reduce(lambda x,y: x or y, l)
}
new_res = []
prev = None
if groupby is not None:
res_dic = {}
for line in results:
if not line[groupby] and prev in res_dic:
res_dic[prev].append(line)
else:
prev = line[groupby]
res_dic.setdefault(line[groupby], [])
res_dic[line[groupby]].append(line)
#we use the keys in results since they are ordered, whereas in res_dic.heys() they aren't
for key in filter(None, [x[groupby] for x in results]):
row = []
for col in range(len(fields)):
if col == groupby:
row.append(fct['groupby'](map(lambda x: x[col], res_dic[key])))
else:
row.append(fct[str(fields[col]['operation'])](map(lambda x: x[col], res_dic[key])))
new_res.append(row)
results = new_res
if report['type']=='table':
if report['field_parent']:
res = self._create_tree(uid, ids, report, fields, level, results, context)
else:
sort_idx = 0
for idx in range(len(fields)):
if fields[idx]['name'] == report['sortby']:
sort_idx = idx
break
try :
results.sort(lambda x,y : cmp(float(x[sort_idx]),float(y[sort_idx])))
except :
results.sort(lambda x,y : cmp(x[sort_idx],y[sort_idx]))
if report['limitt']:
results = results[:int(report['limitt'])]
res = self._create_table(uid, ids, report, fields, None, results, context)
elif report['type'] in ('pie','bar', 'line'):
results2 = []
prev = False
for r in results:
row = []
for j in range(len(r)):
if j == 0 and not r[j]:
row.append(prev)
elif j == 0 and r[j]:
prev = r[j]
row.append(r[j])
else:
try:
row.append(float(r[j]))
except Exception:
row.append(r[j])
results2.append(row)
if report['type']=='pie':
res = self._create_pie(cr,uid, ids, report, fields, results2, context)
elif report['type']=='bar':
res = self._create_bars(cr,uid, ids, report, fields, results2, context)
elif report['type']=='line':
res = self._create_lines(cr,uid, ids, report, fields, results2, context)
return self.obj.get(), 'pdf'
def _create_tree(self, uid, ids, report, fields, level, results, context):
pageSize=common.pageSize.get(report['print_format'], [210.0,297.0])
if report['print_orientation']=='landscape':
pageSize=[pageSize[1],pageSize[0]]
new_doc = etree.Element('report')
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageFormat', '%s' % report['print_format'])
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
length = pageSize[0]-30-reduce(lambda x,y:x+(y['width'] or 0), fields, 0)
count = 0
for f in fields:
if not f['width']: count+=1
for f in fields:
if not f['width']:
f['width']=round((float(length)/count)-0.5)
_append_node('tableSize', '%s' % ','.join(map(lambda x: '%.2fmm' % (x['width'],), fields)))
_append_node('report-header', '%s' % (report['title'],))
_append_node('report-footer', '%s' % (report['footer'],))
header = etree.SubElement(new_doc, 'header')
for f in fields:
field = etree.SubElement(header, 'field')
field.text = f['name']
lines = etree.SubElement(new_doc, 'lines')
level.reverse()
for line in results:
shift = level.pop()
node_line = etree.SubElement(lines, 'row')
prefix = '+'
for f in range(len(fields)):
col = etree.SubElement(node_line, 'col')
if f == 0:
col.attrib.update(para='yes',
tree='yes',
space=str(3*shift)+'mm')
if line[f] is not None:
col.text = prefix+str(line[f]) or ''
else:
col.text = '/'
prefix = ''
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml)
self.obj.render()
return True
def _create_lines(self, cr, uid, ids, report, fields, results, context):
pool = openerp.registry(cr.dbname)
pdf_string = cStringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.show(80,380,'/16/H'+report['title'])
ar = area.T(size=(350,350),
#x_coord = category_coord.T(['2005-09-01','2005-10-22'],0),
x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))
process_date = {
'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
'M': lambda x: x.split('-')[1],
'Y': lambda x: x.split('-')[0]
}
order_date = {
'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
'M': lambda x: x,
'Y': lambda x: x
}
abscissa = []
idx = 0
date_idx = None
fct = {}
for f in fields:
field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0])
if field_id:
type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype'])
if type[0]['ttype'] == 'date':
date_idx = idx
fct[idx] = process_date[report['frequency']]
else:
fct[idx] = lambda x : x
else:
fct[idx] = lambda x : x
idx+=1
# plots are usually displayed year by year
# so we do so if the first field is a date
data_by_year = {}
if date_idx is not None:
for r in results:
key = process_date['Y'](r[date_idx])
if key not in data_by_year:
data_by_year[key] = []
for i in range(len(r)):
r[i] = fct[i](r[i])
data_by_year[key].append(r)
else:
data_by_year[''] = results
idx0 = 0
nb_bar = len(data_by_year)*(len(fields)-1)
colors = map(lambda x:line_style.T(color=x), misc.choice_colors(nb_bar))
abscissa = {}
for line in data_by_year.keys():
fields_bar = []
# sum data and save it in a list. An item for a fields
for d in data_by_year[line]:
for idx in range(len(fields)-1):
fields_bar.append({})
if d[0] in fields_bar[idx]:
fields_bar[idx][d[0]] += d[idx+1]
else:
fields_bar[idx][d[0]] = d[idx+1]
for idx in range(len(fields)-1):
data = {}
for k in fields_bar[idx].keys():
if k in data:
data[k] += fields_bar[idx][k]
else:
data[k] = fields_bar[idx][k]
data_cum = []
prev = 0.0
keys = data.keys()
keys.sort()
# cumulate if necessary
for k in keys:
data_cum.append([k, float(data[k])+float(prev)])
if fields[idx+1]['cumulate']:
prev += data[k]
idx0 = 0
plot = line_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cum, line_style=colors[idx0*(len(fields)-1)+idx])
ar.add_plot(plot)
abscissa.update(fields_bar[idx])
idx0 += 1
abscissa = map(lambda x : [x, None], abscissa)
ar.x_coord = category_coord.T(abscissa,0)
ar.draw(can)
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return True
def _create_bars(self, cr, uid, ids, report, fields, results, context):
pool = openerp.registry(cr.dbname)
pdf_string = cStringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.show(80,380,'/16/H'+report['title'])
process_date = {
'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
'M': lambda x: x.split('-')[1],
'Y': lambda x: x.split('-')[0]
}
order_date = {
'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
'M': lambda x: x,
'Y': lambda x: x
}
ar = area.T(size=(350,350),
x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))
idx = 0
date_idx = None
fct = {}
for f in fields:
field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0])
if field_id:
type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype'])
if type[0]['ttype'] == 'date':
date_idx = idx
fct[idx] = process_date[report['frequency']]
else:
fct[idx] = lambda x : x
else:
fct[idx] = lambda x : x
idx+=1
# plot are usually displayed year by year
# so we do so if the first field is a date
data_by_year = {}
if date_idx is not None:
for r in results:
key = process_date['Y'](r[date_idx])
if key not in data_by_year:
data_by_year[key] = []
for i in range(len(r)):
r[i] = fct[i](r[i])
data_by_year[key].append(r)
else:
data_by_year[''] = results
nb_bar = len(data_by_year)*(len(fields)-1)
colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(nb_bar))
abscissa = {}
for line in data_by_year.keys():
fields_bar = []
# sum data and save it in a list. An item for a fields
for d in data_by_year[line]:
for idx in range(len(fields)-1):
fields_bar.append({})
if d[0] in fields_bar[idx]:
fields_bar[idx][d[0]] += d[idx+1]
else:
fields_bar[idx][d[0]] = d[idx+1]
for idx in range(len(fields)-1):
data = {}
for k in fields_bar[idx].keys():
if k in data:
data[k] += fields_bar[idx][k]
else:
data[k] = fields_bar[idx][k]
data_cum = []
prev = 0.0
keys = data.keys()
keys.sort()
# cumulate if necessary
for k in keys:
data_cum.append([k, float(data[k])+float(prev)])
if fields[idx+1]['cumulate']:
prev += data[k]
idx0 = 0
plot = bar_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cum, cluster=(idx0*(len(fields)-1)+idx,nb_bar), fill_style=colors[idx0*(len(fields)-1)+idx])
ar.add_plot(plot)
abscissa.update(fields_bar[idx])
idx0 += 1
abscissa = map(lambda x : [x, None], abscissa)
abscissa.sort()
ar.x_coord = category_coord.T(abscissa,0)
ar.draw(can)
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return True
def _create_pie(self, cr, uid, ids, report, fields, results, context):
pdf_string = cStringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
ar = area.T(size=(350,350), legend=legend.T(),
x_grid_style = None, y_grid_style = None)
colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(len(results)))
if reduce(lambda x,y : x+y, map(lambda x : x[1],results)) == 0.0:
raise except_osv(_('Error'), _("The sum of the data (2nd field) is null.\nWe can't draw a pie chart !"))
plot = pie_plot.T(data=results, arc_offsets=[0,10,0,10],
shadow = (2, -2, fill_style.gray50),
label_offset = 25,
arrow_style = arrow.a3,
fill_styles=colors)
ar.add_plot(plot)
ar.draw(can)
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return True
def _create_table(self, uid, ids, report, fields, tree, results, context):
pageSize=common.pageSize.get(report['print_format'], [210.0,297.0])
if report['print_orientation']=='landscape':
pageSize=[pageSize[1],pageSize[0]]
new_doc = etree.Element('report')
config = etree.SubElement(new_doc, 'config')
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageFormat', '%s' % report['print_format'])
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
length = pageSize[0]-30-reduce(lambda x,y:x+(y['width'] or 0), fields, 0)
count = 0
for f in fields:
if not f['width']: count+=1
for f in fields:
if not f['width']:
f['width']=round((float(length)/count)-0.5)
_append_node('tableSize', '%s' % ','.join(map(lambda x: '%.2fmm' % (x['width'],), fields)))
_append_node('report-header', '%s' % (report['title'],))
_append_node('report-footer', '%s' % (report['footer'],))
header = etree.SubElement(new_doc, 'header')
for f in fields:
field = etree.SubElement(header, 'field')
field.text = f['name']
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.SubElement(lines, 'row')
for f in range(len(fields)):
col = etree.SubElement(node_line, 'col', tree='no')
if line[f] is not None:
col.text = line[f] or ''
else:
col.text = '/'
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml)
self.obj.render()
return True
report_custom('report.custom')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
madrang/pyTSon-AudioBot | requests/packages/urllib3/request.py | 290 | 5946 | from __future__ import absolute_import
from .filepost import encode_multipart_formdata
from .packages.six.moves.urllib.parse import urlencode
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, headers=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': headers}
extra_kw.update(urlopen_kw)
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| gpl-3.0 |
yfried/ansible | lib/ansible/modules/system/aix_lvol.py | 73 | 10444 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Alain Dejoux (@adejoux)
module: aix_lvol
short_description: Configure AIX LVM logical volumes
description:
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
version_added: "2.4"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
lv_type:
description:
- The type of the logical volume.
default: jfs2
size:
description:
- The size of the logical volume with one of the [MGT] units.
copies:
description:
- The number of copies of the logical volume. Maximum copies are 3.
default: '1'
policy:
choices: [ maximum, minimum ]
default: maximum
description:
- Sets the interphysical volume allocation policy. C(maximum) allocates logical partitions across the maximum number of physical volumes.
C(minimum) allocates logical partitions across the minimum number of physical volumes.
state:
choices: [ absent, present ]
default: present
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
opts:
description:
- Free-form options to be passed to the mklv command.
pvs:
description:
- Comma separated list of physical volumes e.g. C(hdisk1,hdisk2).
'''
EXAMPLES = r'''
- name: Create a logical volume of 512M
aix_lvol:
vg: testvg
lv: testlv
size: 512M
- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
aix_lvol:
vg: testvg
lv: test2lv
size: 512M
pvs: hdisk1,hdisk2
- name: Create a logical volume of 512M mirrored
aix_lvol:
vg: testvg
lv: test3lv
size: 512M
copies: 2
- name: Create a logical volume of 1G with a minimum placement policy
aix_lvol:
vg: rootvg
lv: test4lv
size: 1G
policy: minimum
- name: Create a logical volume with special options like mirror pool
aix_lvol:
vg: testvg
lv: testlv
size: 512M
opts: -p copy1=poolA -p copy2=poolB
- name: Extend the logical volume to 1200M
aix_lvol:
vg: testvg
lv: test4lv
size: 1200M
- name: Remove the logical volume
aix_lvol:
vg: testvg
lv: testlv
state: absent
'''
RETURN = r'''
msg:
type: string
description: A friendly message describing the task result.
returned: always
sample: Logical volume testlv created.
'''
import re
from ansible.module_utils.basic import AnsibleModule
def convert_size(module, size):
unit = size[-1].upper()
units = ['M', 'G', 'T']
try:
multiplier = 1024 ** units.index(unit)
except ValueError:
module.fail_json(msg="No valid size unit specified.")
return int(size[:-1]) * multiplier
def round_ppsize(x, base=16):
new_size = int(base * round(float(x) / base))
if new_size < x:
new_size += base
return new_size
def parse_lv(data):
name = None
for line in data.splitlines():
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
vg = match.group(2)
continue
match = re.search(r"LPs:\s+(\d+).*PPs", line)
if match is not None:
lps = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
if match is not None:
policy = match.group(1)
continue
if not name:
return None
size = lps * pp_size
return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
def parse_vg(data):
for line in data.splitlines():
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
if match is not None:
name = match.group(1)
continue
match = re.search(r"TOTAL PP.*\((\d+)", line)
if match is not None:
size = int(match.group(1))
continue
match = re.search(r"PP SIZE:\s+(\d+)", line)
if match is not None:
pp_size = int(match.group(1))
continue
match = re.search(r"FREE PP.*\((\d+)", line)
if match is not None:
free = int(match.group(1))
continue
return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(type='str', required=True),
lv=dict(type='str', required=True),
lv_type=dict(type='str', default='jfs2'),
size=dict(type='str'),
opts=dict(type='str', default=''),
copies=dict(type='str', default='1'),
state=dict(type='str', default='present', choices=['absent', 'present']),
policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
pvs=dict(type='list', default=list())
),
supports_check_mode=True,
)
vg = module.params['vg']
lv = module.params['lv']
lv_type = module.params['lv_type']
size = module.params['size']
opts = module.params['opts']
copies = module.params['copies']
policy = module.params['policy']
state = module.params['state']
pvs = module.params['pvs']
pv_list = ' '.join(pvs)
if policy == 'maximum':
lv_policy = 'x'
else:
lv_policy = 'm'
# Add echo command when running in check-mode
if module.check_mode:
test_opt = 'echo '
else:
test_opt = ''
# check if system commands are available
lsvg_cmd = module.get_bin_path("lsvg", required=True)
lslv_cmd = module.get_bin_path("lslv", required=True)
# Get information on volume group requested
rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
this_vg = parse_vg(vg_info)
if size is not None:
# Calculate pp size and round it up based on pp size.
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
# Get information on logical volume requested
rc, lv_info, err = module.run_command(
"%s %s" % (lslv_cmd, lv))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
changed = False
this_lv = parse_lv(lv_info)
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
if this_lv is None:
if state == 'present':
if lv_size > this_vg['free']:
module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
# create LV
mklv_cmd = module.get_bin_path("mklv", required=True)
cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
else:
module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
else:
if state == 'absent':
# remove LV
rmlv_cmd = module.get_bin_path("rmlv", required=True)
rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
else:
module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
else:
if this_lv['policy'] != policy:
# change lv allocation policy
chlv_cmd = module.get_bin_path("chlv", required=True)
rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
else:
module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
if vg != this_lv['vg']:
module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
# from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
if not size:
module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
# resize LV based on absolute values
if int(lv_size) > this_lv['size']:
extendlv_cmd = module.get_bin_path("extendlv", required=True)
cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
rc, out, err = module.run_command(cmd)
if rc == 0:
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
else:
module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
elif lv_size < this_lv['size']:
module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
else:
module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
if __name__ == '__main__':
main()
| gpl-3.0 |
duyet-website/api.duyet.net | lib/boto/ec2/securitygroup.py | 150 | 14687 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Security Group
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.exception import BotoClientError
class SecurityGroup(TaggedEC2Object):
def __init__(self, connection=None, owner_id=None,
name=None, description=None, id=None):
super(SecurityGroup, self).__init__(connection)
self.id = id
self.owner_id = owner_id
self.name = name
self.description = description
self.vpc_id = None
self.rules = IPPermissionsList()
self.rules_egress = IPPermissionsList()
def __repr__(self):
return 'SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
retval = super(SecurityGroup, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'ipPermissions':
return self.rules
elif name == 'ipPermissionsEgress':
return self.rules_egress
else:
return None
def endElement(self, name, value, connection):
if name == 'ownerId':
self.owner_id = value
elif name == 'groupId':
self.id = value
elif name == 'groupName':
self.name = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'groupDescription':
self.description = value
elif name == 'ipRanges':
pass
elif name == 'return':
if value == 'false':
self.status = False
elif value == 'true':
self.status = True
else:
raise Exception(
'Unexpected value of status %s for group %s' % (
value,
self.name
)
)
else:
setattr(self, name, value)
def delete(self, dry_run=False):
if self.vpc_id:
return self.connection.delete_security_group(
group_id=self.id,
dry_run=dry_run
)
else:
return self.connection.delete_security_group(
self.name,
dry_run=dry_run
)
def add_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip,
src_group_group_id, dry_run=False):
"""
Add a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
rule = IPPermissions(self)
rule.ip_protocol = ip_protocol
rule.from_port = from_port
rule.to_port = to_port
self.rules.append(rule)
rule.add_grant(
src_group_name,
src_group_owner_id,
cidr_ip,
src_group_group_id,
dry_run=dry_run
)
def remove_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip,
src_group_group_id, dry_run=False):
"""
Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
if not self.rules:
raise ValueError("The security group has no rules")
target_rule = None
for rule in self.rules:
if rule.ip_protocol == ip_protocol:
if rule.from_port == from_port:
if rule.to_port == to_port:
target_rule = rule
target_grant = None
for grant in rule.grants:
if grant.name == src_group_name or grant.group_id == src_group_group_id:
if grant.owner_id == src_group_owner_id:
if grant.cidr_ip == cidr_ip:
target_grant = grant
if target_grant:
rule.grants.remove(target_grant)
if len(rule.grants) == 0:
self.rules.remove(target_rule)
def authorize(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None, dry_run=False):
"""
Add a new rule to this security group.
You need to pass in either src_group_name
OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string or list of strings
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
:class:`boto.ec2.securitygroup.GroupOrCIDR`
:param src_group: The Security Group you are granting access to.
:rtype: bool
:return: True if successful.
"""
group_name = None
if not self.vpc_id:
group_name = self.name
group_id = None
if self.vpc_id:
group_id = self.id
src_group_name = None
src_group_owner_id = None
src_group_group_id = None
if src_group:
cidr_ip = None
src_group_owner_id = src_group.owner_id
if not self.vpc_id:
src_group_name = src_group.name
else:
if hasattr(src_group, 'group_id'):
src_group_group_id = src_group.group_id
else:
src_group_group_id = src_group.id
status = self.connection.authorize_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip,
group_id,
src_group_group_id,
dry_run=dry_run)
if status:
if not isinstance(cidr_ip, list):
cidr_ip = [cidr_ip]
for single_cidr_ip in cidr_ip:
self.add_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, single_cidr_ip,
src_group_group_id, dry_run=dry_run)
return status
def revoke(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None, dry_run=False):
group_name = None
if not self.vpc_id:
group_name = self.name
group_id = None
if self.vpc_id:
group_id = self.id
src_group_name = None
src_group_owner_id = None
src_group_group_id = None
if src_group:
cidr_ip = None
src_group_owner_id = src_group.owner_id
if not self.vpc_id:
src_group_name = src_group.name
else:
if hasattr(src_group, 'group_id'):
src_group_group_id = src_group.group_id
else:
src_group_group_id = src_group.id
status = self.connection.revoke_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip,
group_id,
src_group_group_id,
dry_run=dry_run)
if status:
self.remove_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, cidr_ip, src_group_group_id,
dry_run=dry_run)
return status
def copy_to_region(self, region, name=None, dry_run=False):
"""
Create a copy of this security group in another region.
Note that the new security group will be a separate entity
and will not stay in sync automatically after the copy
operation.
:type region: :class:`boto.ec2.regioninfo.RegionInfo`
:param region: The region to which this security group will be copied.
:type name: string
:param name: The name of the copy. If not supplied, the copy
will have the same name as this security group.
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The new security group.
"""
if region.name == self.region:
raise BotoClientError('Unable to copy to the same Region')
conn_params = self.connection.get_params()
rconn = region.connect(**conn_params)
sg = rconn.create_security_group(
name or self.name,
self.description,
dry_run=dry_run
)
source_groups = []
for rule in self.rules:
for grant in rule.grants:
grant_nom = grant.name or grant.group_id
if grant_nom:
if grant_nom not in source_groups:
source_groups.append(grant_nom)
sg.authorize(None, None, None, None, grant,
dry_run=dry_run)
else:
sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
grant.cidr_ip, dry_run=dry_run)
return sg
def instances(self, dry_run=False):
"""
Find all of the current instances that are running within this
security group.
:rtype: list of :class:`boto.ec2.instance.Instance`
:return: A list of Instance objects
"""
rs = []
if self.vpc_id:
rs.extend(self.connection.get_all_reservations(
filters={'instance.group-id': self.id},
dry_run=dry_run
))
else:
rs.extend(self.connection.get_all_reservations(
filters={'group-id': self.id},
dry_run=dry_run
))
instances = [i for r in rs for i in r.instances]
return instances
class IPPermissionsList(list):
def startElement(self, name, attrs, connection):
if name == 'item':
self.append(IPPermissions(self))
return self[-1]
return None
def endElement(self, name, value, connection):
pass
class IPPermissions(object):
def __init__(self, parent=None):
self.parent = parent
self.ip_protocol = None
self.from_port = None
self.to_port = None
self.grants = []
def __repr__(self):
return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol,
self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
if name == 'item':
self.grants.append(GroupOrCIDR(self))
return self.grants[-1]
return None
def endElement(self, name, value, connection):
if name == 'ipProtocol':
self.ip_protocol = value
elif name == 'fromPort':
self.from_port = value
elif name == 'toPort':
self.to_port = value
else:
setattr(self, name, value)
def add_grant(self, name=None, owner_id=None, cidr_ip=None, group_id=None,
dry_run=False):
grant = GroupOrCIDR(self)
grant.owner_id = owner_id
grant.group_id = group_id
grant.name = name
grant.cidr_ip = cidr_ip
self.grants.append(grant)
return grant
class GroupOrCIDR(object):
def __init__(self, parent=None):
self.owner_id = None
self.group_id = None
self.name = None
self.cidr_ip = None
def __repr__(self):
if self.cidr_ip:
return '%s' % self.cidr_ip
else:
return '%s-%s' % (self.name or self.group_id, self.owner_id)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'userId':
self.owner_id = value
elif name == 'groupId':
self.group_id = value
elif name == 'groupName':
self.name = value
if name == 'cidrIp':
self.cidr_ip = value
else:
setattr(self, name, value)
| mit |
CiscoSystems/networking-cisco | networking_cisco/plugins/ml2/drivers/cisco/nexus/constants.py | 2 | 1210 | # Copyright 2011 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
USERNAME = 'username'
PASSWORD = 'password'
NETWORK_ADMIN = 'network_admin'
TYPE_NEXUS_VXLAN = 'nexus_vxlan'
# TODO(rpothier) Add back in provider segment support.
PROVIDER_SEGMENT = 'provider_segment'
NVE_INT_NUM = '1'
NEXUS_MAX_VLAN_NAME_LEN = 32
NO_DUPLICATE = 0
DUPLICATE_VLAN = 1
DUPLICATE_PORT = 2
NEXUS_TYPE_INVALID = -1
NEXUS_3K = 3
NEXUS_5K = 5
NEXUS_7K = 7
NEXUS_9K = 9
MAX_NEXUS_SSH_SESSIONS = 8
REPLAY_FAILURES = '_replay_failures'
FAIL_CONTACT = '_contact'
FAIL_CONFIG = '_config'
| apache-2.0 |
Edraak/edraak-platform | lms/djangoapps/instructor/tests/test_email.py | 9 | 7399 | """
Unit tests for email feature flag in new instructor dashboard.
Additionally tests that bulk email is always disabled for
non-Mongo backed courses, regardless of email feature flag, and
that the view is conditionally available when Course Auth is turned on.
"""
from django.urls import reverse
from nose.plugins.attrib import attr
from opaque_keys.edx.keys import CourseKey
from six import text_type
from bulk_email.models import BulkEmailFlag, CourseAuthorization
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
class TestNewInstructorDashboardEmailViewMongoBacked(SharedModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
for Mongo-backed courses
"""
@classmethod
def setUpClass(cls):
super(TestNewInstructorDashboardEmailViewMongoBacked, cls).setUpClass()
cls.course = CourseFactory.create()
# URL for instructor dash
cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course.id)})
# URL for email view
cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def setUp(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).setUp()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
def tearDown(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).tearDown()
BulkEmailFlag.objects.all().delete()
# In order for bulk email to work, we must have both the BulkEmailFlag.is_enabled()
# set to True and for the course to be Mongo-backed.
# The flag is enabled and the course is Mongo-backed (should work)
def test_email_flag_true_mongo_true(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
# Assert that instructor email is enabled for this course - since REQUIRE_COURSE_EMAIL_AUTH is False,
# all courses should be authorized to use email.
self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
send_to_label = '<div class="send_to_list">Send to:</div>'
self.assertIn(send_to_label, response.content)
self.assertEqual(response.status_code, 200)
# The course is Mongo-backed but the flag is disabled (should not work)
def test_email_flag_false_mongo_true(self):
BulkEmailFlag.objects.create(enabled=False)
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Flag is enabled, but we require course auth and haven't turned it on for this course
def test_course_not_authorized(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True)
# Assert that instructor email is not enabled for this course
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Flag is enabled, we require course auth and turn it on for this course
def test_course_authorized(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True)
# Assert that instructor email is not enabled for this course
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email is enabled for this course
self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
# Flag is disabled, but course is authorized
def test_course_authorized_feature_off(self):
BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=True)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that this course is authorized for instructor email, but the feature is not enabled
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view IS NOT in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
@attr(shard=1)
class TestNewInstructorDashboardEmailViewXMLBacked(SharedModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestNewInstructorDashboardEmailViewXMLBacked, cls).setUpClass()
cls.course_key = CourseKey.from_string('edX/toy/2012_Fall')
# URL for instructor dash
cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course_key)})
# URL for email view
cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def setUp(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).setUp()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course_key)})
# URL for email view
self.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def tearDown(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).tearDown()
BulkEmailFlag.objects.all().delete()
# The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should
# be authorized to use email. But the course is not Mongo-backed (should not work)
def test_email_flag_true_mongo_false(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# The flag is disabled and the course is not Mongo-backed (should not work)
def test_email_flag_false_mongo_false(self):
BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=False)
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
| agpl-3.0 |
yephper/django | tests/humanize_tests/tests.py | 1 | 13453 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
from unittest import skipIf
from django.contrib.humanize.templatetags import humanize
from django.template import Context, Template, defaultfilters
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
from django.utils.html import escape
from django.utils.timezone import get_fixed_timezone, utc
from django.utils.translation import ugettext as _
try:
import pytz
except ImportError:
pytz = None
# Mock out datetime in some tests so they don't fail occasionally when they
# run too slow. Use a fixed datetime for datetime.now(). DST change in
# America/Chicago (the default time zone) happened on March 11th in 2012.
now = datetime.datetime(2012, 3, 9, 22, 30)
class MockDateTime(datetime.datetime):
@classmethod
def now(cls, tz=None):
if tz is None or tz.utcoffset(now) is None:
return now
else:
# equals now.replace(tzinfo=utc)
return now.replace(tzinfo=tz) + tz.utcoffset(now)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.humanize'})
class HumanizeTests(TestCase):
def humanize_tester(self, test_list, result_list, method, normalize_result_func=escape):
for test_content, result in zip(test_list, result_list):
t = Template('{%% load humanize %%}{{ test_content|%s }}' % method)
rendered = t.render(Context(locals())).strip()
self.assertEqual(rendered, normalize_result_func(result),
msg="%s test failed, produced '%s', should've produced '%s'" % (method, rendered, result))
def test_ordinal(self):
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1st', '2nd', '3rd', '4th', '11th',
'12th', '13th', '101st', '102nd', '103rd',
'111th', 'something else', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'ordinal')
def test_i18n_html_ordinal(self):
"""Allow html in output on i18n strings"""
test_list = ('1', '2', '3', '4', '11', '12',
'13', '101', '102', '103', '111',
'something else', None)
result_list = ('1<sup>er</sup>', '2<sup>e</sup>', '3<sup>e</sup>', '4<sup>e</sup>',
'11<sup>e</sup>', '12<sup>e</sup>', '13<sup>e</sup>', '101<sup>er</sup>',
'102<sup>e</sup>', '103<sup>e</sup>', '111<sup>e</sup>', 'something else',
'None')
with translation.override('fr-fr'):
self.humanize_tester(test_list, result_list, 'ordinal', lambda x: x)
def test_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567', Decimal('1234567.1234567'),
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567', '1,234,567.1234567',
None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_l10n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', '1234567.1234567', Decimal('1234567.1234567'),
None)
result_list = ('100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.25',
'100', '1,000', '10,123', '10,311', '1,000,000', '1,234,567.1234567', '1,234,567.1234567',
None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=False):
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_intcomma_without_number_grouping(self):
# Regression for #17414
with translation.override('ja'), self.settings(USE_L10N=True):
self.humanize_tester([100], ['100'], 'intcomma')
def test_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000',
'1300000000000000', '3500000000000000000000',
'8100000000000000000000000000000000', None)
result_list = ('100', '1.0 million', '1.2 million', '1.3 million',
'1.0 billion', '2.0 billion', '6.0 trillion',
'1.3 quadrillion', '3.5 sextillion',
'8.1 decillion', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'intword')
def test_i18n_intcomma(self):
test_list = (100, 1000, 10123, 10311, 1000000, 1234567.25,
'100', '1000', '10123', '10311', '1000000', None)
result_list = ('100', '1.000', '10.123', '10.311', '1.000.000', '1.234.567,25',
'100', '1.000', '10.123', '10.311', '1.000.000', None)
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intcomma')
def test_i18n_intword(self):
test_list = ('100', '1000000', '1200000', '1290000',
'1000000000', '2000000000', '6000000000000')
result_list = ('100', '1,0 Million', '1,2 Millionen', '1,3 Millionen',
'1,0 Milliarde', '2,0 Milliarden', '6,0 Billionen')
with self.settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True):
with translation.override('de'):
self.humanize_tester(test_list, result_list, 'intword')
def test_apnumber(self):
test_list = [str(x) for x in range(1, 11)]
test_list.append(None)
result_list = ('one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', '10', None)
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'apnumber')
def test_naturalday(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
tomorrow = today + datetime.timedelta(days=1)
someday = today - datetime.timedelta(days=10)
notdate = "I'm not a date value"
test_list = (today, yesterday, tomorrow, someday, notdate, None)
someday_result = defaultfilters.date(someday)
result_list = (_('today'), _('yesterday'), _('tomorrow'),
someday_result, "I'm not a date value", None)
self.humanize_tester(test_list, result_list, 'naturalday')
def test_naturalday_tz(self):
today = datetime.date.today()
tz_one = get_fixed_timezone(-720)
tz_two = get_fixed_timezone(720)
# Can be today or yesterday
date_one = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_one)
naturalday_one = humanize.naturalday(date_one)
# Can be today or tomorrow
date_two = datetime.datetime(today.year, today.month, today.day, tzinfo=tz_two)
naturalday_two = humanize.naturalday(date_two)
# As 24h of difference they will never be the same
self.assertNotEqual(naturalday_one, naturalday_two)
@skipIf(pytz is None, "this test requires pytz")
def test_naturalday_uses_localtime(self):
# Regression for #18504
# This is 2012-03-08HT19:30:00-06:00 in America/Chicago
dt = datetime.datetime(2012, 3, 9, 1, 30, tzinfo=utc)
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with override_settings(TIME_ZONE="America/Chicago", USE_TZ=True):
with translation.override('en'):
self.humanize_tester([dt], ['yesterday'], 'naturalday')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
test_list = [
now,
now - datetime.timedelta(seconds=1),
now - datetime.timedelta(seconds=30),
now - datetime.timedelta(minutes=1, seconds=30),
now - datetime.timedelta(minutes=2),
now - datetime.timedelta(hours=1, minutes=30, seconds=30),
now - datetime.timedelta(hours=23, minutes=50, seconds=50),
now - datetime.timedelta(days=1),
now - datetime.timedelta(days=500),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=30),
now + datetime.timedelta(minutes=1, seconds=30),
now + datetime.timedelta(minutes=2),
now + datetime.timedelta(hours=1, minutes=30, seconds=30),
now + datetime.timedelta(hours=23, minutes=50, seconds=50),
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=2, hours=6),
now + datetime.timedelta(days=500),
now.replace(tzinfo=naive()),
now.replace(tzinfo=utc),
]
result_list = [
'now',
'a second ago',
'30\xa0seconds ago',
'a minute ago',
'2\xa0minutes ago',
'an hour ago',
'23\xa0hours ago',
'1\xa0day ago',
'1\xa0year, 4\xa0months ago',
'a second from now',
'30\xa0seconds from now',
'a minute from now',
'2\xa0minutes from now',
'an hour from now',
'23\xa0hours from now',
'1\xa0day from now',
'2\xa0days, 6\xa0hours from now',
'1\xa0year, 4\xa0months from now',
'now',
'now',
]
# Because of the DST change, 2 days and 6 hours after the chosen
# date in naive arithmetic is only 2 days and 5 hours after in
# aware arithmetic.
result_list_with_tz_support = result_list[:]
assert result_list_with_tz_support[-4] == '2\xa0days, 6\xa0hours from now'
result_list_with_tz_support[-4] == '2\xa0days, 5\xa0hours from now'
orig_humanize_datetime, humanize.datetime = humanize.datetime, MockDateTime
try:
with translation.override('en'):
self.humanize_tester(test_list, result_list, 'naturaltime')
with override_settings(USE_TZ=True):
self.humanize_tester(
test_list, result_list_with_tz_support, 'naturaltime')
finally:
humanize.datetime = orig_humanize_datetime
def test_naturaltime_as_documented(self):
"""
#23340 -- Verify the documented behavior of humanize.naturaltime.
"""
time_format = '%d %b %Y %H:%M:%S'
documented_now = datetime.datetime.strptime('17 Feb 2007 16:30:00', time_format)
test_data = (
('17 Feb 2007 16:30:00', 'now'),
('17 Feb 2007 16:29:31', '29 seconds ago'),
('17 Feb 2007 16:29:00', 'a minute ago'),
('17 Feb 2007 16:25:35', '4 minutes ago'),
('17 Feb 2007 15:30:29', '59 minutes ago'),
('17 Feb 2007 15:30:01', '59 minutes ago'),
('17 Feb 2007 15:30:00', 'an hour ago'),
('17 Feb 2007 13:31:29', '2 hours ago'),
('16 Feb 2007 13:31:29', '1 day, 2 hours ago'),
('16 Feb 2007 13:30:01', '1 day, 2 hours ago'),
('16 Feb 2007 13:30:00', '1 day, 3 hours ago'),
('17 Feb 2007 16:30:30', '30 seconds from now'),
('17 Feb 2007 16:30:29', '29 seconds from now'),
('17 Feb 2007 16:31:00', 'a minute from now'),
('17 Feb 2007 16:34:35', '4 minutes from now'),
('17 Feb 2007 17:30:29', 'an hour from now'),
('17 Feb 2007 18:31:29', '2 hours from now'),
('18 Feb 2007 16:31:29', '1 day from now'),
('26 Feb 2007 18:31:29', '1 week, 2 days from now'),
)
class DocumentedMockDateTime(datetime.datetime):
@classmethod
def now(cls, tz=None):
if tz is None or tz.utcoffset(documented_now) is None:
return documented_now
else:
return documented_now.replace(tzinfo=tz) + tz.utcoffset(now)
orig_humanize_datetime = humanize.datetime
humanize.datetime = DocumentedMockDateTime
try:
for test_time_string, expected_natural_time in test_data:
test_time = datetime.datetime.strptime(test_time_string, time_format)
natural_time = humanize.naturaltime(test_time).replace('\xa0', ' ')
self.assertEqual(expected_natural_time, natural_time)
finally:
humanize.datetime = orig_humanize_datetime
| bsd-3-clause |
aristotle-tek/cuny-bdif | AWS/ec2/lib/boto-2.34.0/boto/dynamodb/exceptions.py | 185 | 1687 | """
Exceptions that are specific to the dynamodb module.
"""
from boto.exception import BotoServerError, BotoClientError
from boto.exception import DynamoDBResponseError
class DynamoDBExpiredTokenError(BotoServerError):
"""
Raised when a DynamoDB security token expires. This is generally boto's
(or the user's) notice to renew their DynamoDB security tokens.
"""
pass
class DynamoDBKeyNotFoundError(BotoClientError):
"""
Raised when attempting to retrieve or interact with an item whose key
can't be found.
"""
pass
class DynamoDBItemError(BotoClientError):
"""
Raised when invalid parameters are passed when creating a
new Item in DynamoDB.
"""
pass
class DynamoDBNumberError(BotoClientError):
"""
Raised in the event of incompatible numeric type casting.
"""
pass
class DynamoDBConditionalCheckFailedError(DynamoDBResponseError):
"""
Raised when a ConditionalCheckFailedException response is received.
This happens when a conditional check, expressed via the expected_value
paramenter, fails.
"""
pass
class DynamoDBValidationError(DynamoDBResponseError):
"""
Raised when a ValidationException response is received. This happens
when one or more required parameter values are missing, or if the item
has exceeded the 64Kb size limit.
"""
pass
class DynamoDBThroughputExceededError(DynamoDBResponseError):
"""
Raised when the provisioned throughput has been exceeded.
Normally, when provisioned throughput is exceeded the operation
is retried. If the retries are exhausted then this exception
will be raised.
"""
pass
| mit |
MrLoick/python-for-android | python3-alpha/python3-src/Lib/encodings/cp860.py | 272 | 34681 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp860',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
'\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
'\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\u20a7' # 0x009e -> PESETA SIGN
'\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
pu6ki/elsyser | homeworks/migrations/0001_initial.py | 1 | 2104 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-09 14:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('students', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Homework',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deadline', models.DateField()),
('details', models.TextField(blank=True, max_length=256)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='students.Teacher')),
('clazz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Class')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Subject')),
],
options={
'ordering': ['-deadline', 'clazz', 'subject'],
},
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=2048)),
('solution_url', models.URLField(blank=True)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('edited', models.BooleanField(default=False)),
('last_edited_on', models.DateTimeField(auto_now=True)),
('checked', models.BooleanField(default=False)),
('homework', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='homeworks.Homework')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Student')),
],
options={
'ordering': ['-posted_on', '-last_edited_on'],
},
),
]
| mit |
kawamon/hue | desktop/core/ext-py/Paste-2.0.1/paste/reloader.py | 33 | 6046 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
A file monitor and server restarter.
Use this like:
..code-block:: Python
import reloader
reloader.install()
Then make sure your server is installed with a shell script like::
err=3
while test "$err" -eq 3 ; do
python server.py
err="$?"
done
or is run from this .bat file (if you use Windows)::
@echo off
:repeat
python server.py
if %errorlevel% == 3 goto repeat
or run a monitoring process in Python (``paster serve --reload`` does
this).
Use the ``watch_file(filename)`` function to cause a reload/restart for
other other non-Python files (e.g., configuration files). If you have
a dynamic set of files that grows over time you can use something like::
def watch_config_files():
return CONFIG_FILE_CACHE.keys()
paste.reloader.add_file_callback(watch_config_files)
Then every time the reloader polls files it will call
``watch_config_files`` and check all the filenames it returns.
"""
from __future__ import print_function
import os
import sys
import time
import threading
import traceback
from paste.util.classinstance import classinstancemethod
def install(poll_interval=1):
"""
Install the reloading monitor.
On some platforms server threads may not terminate when the main
thread does, causing ports to remain open/locked. The
``raise_keyboard_interrupt`` option creates a unignorable signal
which causes the whole application to shut-down (rudely).
"""
mon = Monitor(poll_interval=poll_interval)
t = threading.Thread(target=mon.periodic_reload)
t.setDaemon(True)
t.start()
class Monitor(object):
instances = []
global_extra_files = []
global_file_callbacks = []
def __init__(self, poll_interval):
self.module_mtimes = {}
self.keep_running = True
self.poll_interval = poll_interval
self.extra_files = list(self.global_extra_files)
self.instances.append(self)
self.file_callbacks = list(self.global_file_callbacks)
def periodic_reload(self):
while True:
if not self.check_reload():
# use os._exit() here and not sys.exit() since within a
# thread sys.exit() just closes the given thread and
# won't kill the process; note os._exit does not call
# any atexit callbacks, nor does it do finally blocks,
# flush open files, etc. In otherwords, it is rude.
os._exit(3)
break
time.sleep(self.poll_interval)
def check_reload(self):
filenames = list(self.extra_files)
for file_callback in self.file_callbacks:
try:
filenames.extend(file_callback())
except:
print("Error calling paste.reloader callback %r:" % file_callback,
file=sys.stderr)
traceback.print_exc()
for module in sys.modules.values():
try:
filename = module.__file__
except (AttributeError, ImportError):
continue
if filename is not None:
filenames.append(filename)
for filename in filenames:
try:
stat = os.stat(filename)
if stat:
mtime = stat.st_mtime
else:
mtime = 0
except (OSError, IOError):
continue
if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
elif filename.endswith('$py.class') and \
os.path.exists(filename[:-9] + '.py'):
mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime)
if not self.module_mtimes.has_key(filename):
self.module_mtimes[filename] = mtime
elif self.module_mtimes[filename] < mtime:
print("%s changed; reloading..." % filename, file=sys.stderr)
return False
return True
def watch_file(self, cls, filename):
"""Watch the named file for changes"""
filename = os.path.abspath(filename)
if self is None:
for instance in cls.instances:
instance.watch_file(filename)
cls.global_extra_files.append(filename)
else:
self.extra_files.append(filename)
watch_file = classinstancemethod(watch_file)
def add_file_callback(self, cls, callback):
"""Add a callback -- a function that takes no parameters -- that will
return a list of filenames to watch for changes."""
if self is None:
for instance in cls.instances:
instance.add_file_callback(callback)
cls.global_file_callbacks.append(callback)
else:
self.file_callbacks.append(callback)
add_file_callback = classinstancemethod(add_file_callback)
if sys.platform.startswith('java'):
try:
from _systemrestart import SystemRestart
except ImportError:
pass
else:
class JythonMonitor(Monitor):
"""
Monitor that utilizes Jython's special
``_systemrestart.SystemRestart`` exception.
When raised from the main thread it causes Jython to reload
the interpreter in the existing Java process (avoiding
startup time).
Note that this functionality of Jython is experimental and
may change in the future.
"""
def periodic_reload(self):
while True:
if not self.check_reload():
raise SystemRestart()
time.sleep(self.poll_interval)
watch_file = Monitor.watch_file
add_file_callback = Monitor.add_file_callback
| apache-2.0 |
roadmapper/ansible | lib/ansible/plugins/__init__.py | 43 | 3362 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass, string_types
from ansible.utils.display import Display
display = Display()
# Global so that all instances of a PluginLoader will share the caches
MODULE_CACHE = {}
PATH_CACHE = {}
PLUGIN_PATH_CACHE = {}
def get_plugin_class(obj):
if isinstance(obj, string_types):
return obj.lower().replace('module', '')
else:
return obj.__class__.__name__.lower().replace('module', '')
class AnsiblePlugin(with_metaclass(ABCMeta, object)):
# allow extra passthrough parameters
allow_extras = False
def __init__(self):
self._options = {}
def get_option(self, option, hostvars=None):
if option not in self._options:
try:
option_value = C.config.get_config_value(option, plugin_type=get_plugin_class(self), plugin_name=self._load_name, variables=hostvars)
except AnsibleError as e:
raise KeyError(to_native(e))
self.set_option(option, option_value)
return self._options.get(option)
def set_option(self, option, value):
self._options[option] = value
def set_options(self, task_keys=None, var_options=None, direct=None):
'''
Sets the _options attribute with the configuration/keyword information for this plugin
:arg task_keys: Dict with playbook keywords that affect this option
:arg var_options: Dict with either 'connection variables'
:arg direct: Dict with 'direct assignment'
'''
self._options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
# allow extras/wildcards from vars that are not directly consumed in configuration
# this is needed to support things like winrm that can have extended protocol options we don't directly handle
if self.allow_extras and var_options and '_extras' in var_options:
self.set_option('_extras', var_options['_extras'])
def has_option(self, option):
if not self._options:
self.set_options()
return option in self._options
def _check_required(self):
# FIXME: standardize required check based on config
pass
| gpl-3.0 |
samantp/gensimPy3 | gensim-develop/gensim/models/lsi_dispatcher.py | 1 | 7001 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s SIZE_OF_JOBS_QUEUE
Dispatcher process which orchestrates distributed LSI computations. Run this \
script only once, on any node in your cluster.
Example: python -m gensim.models.lsi_dispatcher
"""
import os, sys, logging, threading, time
from queue import Queue
from gensim import utils
logger = logging.getLogger("gensim.models.lsi_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LSI), in which case you can override
# this value from command line. ie. run "python ./lsi_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should really be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher(object):
"""
Dispatcher object that communicates and coordinates individual workers.
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""
Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
def initialize(self, **model_params):
"""
`model_params` are parameters used to initialize individual workers (gets
handed all the way down to worker.initialize()).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
import Pyro4
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
self.callback._pyroOneway.add(
"jobdone") # make sure workers transfer control back to dispatcher asynchronously
for name, uri in ns.list(prefix='gensim.lsi_worker').items():
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
worker._pyroOneway.add("requestjob")
worker._pyroOneway.add("exit")
logger.info("registering worker #%i from %s" % (workerid, uri))
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
worker.requestjob()
except Pyro4.errors.PyroError:
logger.exception("unresponsive worker at %s, deleting it from the name server" % uri)
ns.remove(name)
if len(self.workers) == 0:
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
def getworkers(self):
"""
Return pyro URIs of all registered workers.
"""
return [worker._pyroUri for worker in self.workers.values()]
def getjob(self, worker_id):
logger.info("worker #%i requesting a new job" % worker_id)
job = self.jobs.get(block=True, timeout=HUGE_TIMEOUT)
logger.info("worker #%i got a new job (%i left)" % (worker_id, self.jobs.qsize()))
return job
def putjob(self, job):
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)" % self.jobs.qsize())
def getstate(self):
"""
Merge projections from across all workers and return the final projection.
"""
logger.info("end of input, assigning all remaining jobs")
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers" % len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s" % workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""
A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between dispatcher.jobdone()
worker.requestjob().
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i" % (workerid, self._jobsdone))
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap self._jobsdone, needed for remote access through proxies"""
return self._jobsdone
def exit(self):
"""
Terminate all registered workers and then the dispatcher.
"""
for workerid, worker in self.workers.items():
logger.info("terminating worker %s" % workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
#endclass Dispatcher
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print(globals()["__doc__"] % locals())
sys.exit(1)
if len(sys.argv) < 2:
maxsize = MAX_JOBS_QUEUE
else:
maxsize = int(sys.argv[1])
utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=maxsize))
logger.info("finished running %s" % program)
if __name__ == '__main__':
main()
| gpl-3.0 |
geekboxzone/lollipop_external_chromium_org | tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_loop.py | 141 | 9516 | #! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a loop back connection receiving itself what it sent.
#
# The purpose of this module is.. well... You can run the unit tests with it.
# and it was so easy to implement ;-)
#
# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: loop://[option[/option...]]
# options:
# - "debug" print diagnostic messages
from serial.serialutil import *
import threading
import time
import logging
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
class LoopbackSerial(SerialBase):
"""Serial port implementation that simulates a loop back connection in plain software."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._isOpen:
raise SerialException("Port is already open.")
self.logger = None
self.buffer_lock = threading.Lock()
self.loop_buffer = bytearray()
self.cts = False
self.dsr = False
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
# not that there is anything to open, but the function applies the
# options found in the URL
self.fromURL(self.port)
# not that there anything to configure...
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""Set communication parameters on opened port. for the loop://
protocol all settings are ignored!"""
# not that's it of any real use, but it helps in the unit tests
if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
if self.logger:
self.logger.info('_reconfigurePort()')
def close(self):
"""Close port"""
if self._isOpen:
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("loop://"): url = url[7:]
try:
# process options now, directly altering self
for option in url.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if not option:
pass
elif option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.loop')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: %r' % (option,))
except ValueError, e:
raise SerialException('expected a string in the form "[loop://][option[/option...]]": %s' % e)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
# attention the logged value can differ from return value in
# threaded environments...
self.logger.debug('inWaiting() -> %d' % (len(self.loop_buffer),))
return len(self.loop_buffer)
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self._isOpen: raise portNotOpenError
if self._timeout is not None:
timeout = time.time() + self._timeout
else:
timeout = None
data = bytearray()
while size > 0:
self.buffer_lock.acquire()
try:
block = to_bytes(self.loop_buffer[:size])
del self.loop_buffer[:size]
finally:
self.buffer_lock.release()
data += block
size -= len(block)
# check for timeout now, after data has been read.
# useful for timeout = 0 (non blocking) read
if timeout and time.time() > timeout:
break
return bytes(data)
def write(self, data):
"""Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed."""
if not self._isOpen: raise portNotOpenError
# ensure we're working with bytes
data = to_bytes(data)
# calculate aprox time that would be used to send the data
time_used_to_send = 10.0*len(data) / self._baudrate
# when a write timeout is configured check if we would be successful
# (not sending anything, not even the part that would have time)
if self._writeTimeout is not None and time_used_to_send > self._writeTimeout:
time.sleep(self._writeTimeout) # must wait so that unit test succeeds
raise writeTimeoutError
self.buffer_lock.acquire()
try:
self.loop_buffer += data
finally:
self.buffer_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushInput()')
self.buffer_lock.acquire()
try:
del self.loop_buffer[:]
finally:
self.buffer_lock.release()
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushOutput()')
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given
duration."""
if not self._isOpen: raise portNotOpenError
def setBreak(self, level=True):
"""Set break: Controls TXD. When active, to transmitting is
possible."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setBreak(%r)' % (level,))
def setRTS(self, level=True):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setRTS(%r) -> state of CTS' % (level,))
self.cts = level
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setDTR(%r) -> state of DSR' % (level,))
self.dsr = level
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getCTS() -> state of RTS (%r)' % (self.cts,))
return self.cts
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getDSR() -> state of DTR (%r)' % (self.dsr,))
return self.dsr
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getRI()')
return False
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getCD()')
return True
# - - - platform specific - - -
# None so far
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(LoopbackSerial, FileLike):
pass
else:
# io library present
class Serial(LoopbackSerial, io.RawIOBase):
pass
# simple client test
if __name__ == '__main__':
import sys
s = Serial('loop://')
sys.stdout.write('%s\n' % s)
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
s.close()
| bsd-3-clause |
Dude-X/selenium | py/selenium/webdriver/firefox/service.py | 34 | 2261 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
"""Object that manages the starting and stopping of the
GeckoDriver."""
def __init__(self, executable_path, port=0, service_args=None,
log_path="geckodriver.log", env=None):
"""Creates a new instance of the GeckoDriver remote service proxy.
GeckoDriver provides a HTTP interface speaking the W3C WebDriver
protocol to Marionette.
:param executable_path: Path to the GeckoDriver binary.
:param port: Run the remote service on a specified port.
Defaults to 0, which binds to a random open port of the
system's choosing.
:param service_args: Optional list of arguments to pass to the
GeckoDriver binary.
:param log_path: Optional path for the GeckoDriver to log to.
Defaults to _geckodriver.log_ in the current working directory.
:param env: Optional dictionary of output variables to expose
in the services' environment.
"""
log_file = open(log_path, "a+") if log_path is not None and log_path != "" else None
service.Service.__init__(
self, executable_path, port=port, log_file=log_file, env=env)
self.service_args = service_args or []
def command_line_args(self):
return ["--port", "%d" % self.port] + self.service_args
def send_remote_shutdown_command(self):
pass
| apache-2.0 |
tvalacarta/tvalacarta | python/main-classic/lib/youtube_dl/extractor/aenetworks.py | 11 | 9916 | # coding: utf-8
from __future__ import unicode_literals
import re
from .theplatform import ThePlatformIE
from ..utils import (
extract_attributes,
ExtractorError,
int_or_none,
smuggle_url,
update_url_query,
)
from ..compat import (
compat_urlparse,
)
class AENetworksBaseIE(ThePlatformIE):
_THEPLATFORM_KEY = 'crazyjava'
_THEPLATFORM_SECRET = 's3cr3t'
def _extract_aen_smil(self, smil_url, video_id, auth=None):
query = {'mbr': 'true'}
if auth:
query['auth'] = auth
TP_SMIL_QUERY = [{
'assetTypes': 'high_video_ak',
'switch': 'hls_high_ak'
}, {
'assetTypes': 'high_video_s3'
}, {
'assetTypes': 'high_video_s3',
'switch': 'hls_ingest_fastly'
}]
formats = []
subtitles = {}
last_e = None
for q in TP_SMIL_QUERY:
q.update(query)
m_url = update_url_query(smil_url, q)
m_url = self._sign_url(m_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
try:
tp_formats, tp_subtitles = self._extract_theplatform_smil(
m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes']))
except ExtractorError as e:
last_e = e
continue
formats.extend(tp_formats)
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
if last_e and not formats:
raise last_e
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
}
class AENetworksIE(AENetworksBaseIE):
IE_NAME = 'aenetworks'
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?P<domain>
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
fyi\.tv
)/
(?:
shows/(?P<show_path>[^/]+(?:/[^/]+){0,2})|
movies/(?P<movie_display_id>[^/]+)(?:/full-movie)?|
specials/(?P<special_display_id>[^/]+)/(?:full-special|preview-)|
collections/[^/]+/(?P<collection_display_id>[^/]+)
)
'''
_TESTS = [{
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
'info_dict': {
'id': '22253814',
'ext': 'mp4',
'title': 'Winter is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
'timestamp': 1338306241,
'upload_date': '20120529',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/shows/ancient-aliens/season-1',
'info_dict': {
'id': '71889446852',
},
'playlist_mincount': 5,
}, {
'url': 'http://www.mylifetime.com/shows/atlanta-plastic',
'info_dict': {
'id': 'SERIES4317',
'title': 'Atlanta Plastic',
},
'playlist_mincount': 2,
}, {
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
'only_matching': True
}, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
'only_matching': True
}, {
'url': 'https://www.lifetimemovieclub.com/movies/a-killer-among-us',
'only_matching': True
}, {
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
'only_matching': True
}, {
'url': 'https://www.historyvault.com/collections/america-the-story-of-us/westward',
'only_matching': True
}, {
'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
'only_matching': True
}]
_DOMAIN_TO_REQUESTOR_ID = {
'history.com': 'HISTORY',
'aetv.com': 'AETV',
'mylifetime.com': 'LIFETIME',
'lifetimemovieclub.com': 'LIFETIMEMOVIECLUB',
'fyi.tv': 'FYI',
}
def _real_extract(self, url):
domain, show_path, movie_display_id, special_display_id, collection_display_id = re.match(self._VALID_URL, url).groups()
display_id = show_path or movie_display_id or special_display_id or collection_display_id
webpage = self._download_webpage(url, display_id, headers=self.geo_verification_headers())
if show_path:
url_parts = show_path.split('/')
url_parts_len = len(url_parts)
if url_parts_len == 1:
entries = []
for season_url_path in re.findall(r'(?s)<li[^>]+data-href="(/shows/%s/season-\d+)"' % url_parts[0], webpage):
entries.append(self.url_result(
compat_urlparse.urljoin(url, season_url_path), 'AENetworks'))
if entries:
return self.playlist_result(
entries, self._html_search_meta('aetn:SeriesId', webpage),
self._html_search_meta('aetn:SeriesTitle', webpage))
else:
# single season
url_parts_len = 2
if url_parts_len == 2:
entries = []
for episode_item in re.findall(r'(?s)<[^>]+class="[^"]*(?:episode|program)-item[^"]*"[^>]*>', webpage):
episode_attributes = extract_attributes(episode_item)
episode_url = compat_urlparse.urljoin(
url, episode_attributes['data-canonical'])
entries.append(self.url_result(
episode_url, 'AENetworks',
episode_attributes.get('data-videoid') or episode_attributes.get('data-video-id')))
return self.playlist_result(
entries, self._html_search_meta('aetn:SeasonId', webpage))
video_id = self._html_search_meta('aetn:VideoID', webpage)
media_url = self._search_regex(
[r"media_url\s*=\s*'(?P<url>[^']+)'",
r'data-media-url=(?P<url>(?:https?:)?//[^\s>]+)',
r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
webpage, 'video url', group='url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
auth = None
if theplatform_metadata.get('AETN$isBehindWall'):
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
resource = self._get_mvpd_resource(
requestor_id, theplatform_metadata['title'],
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
theplatform_metadata['ratings'][0]['rating'])
auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
info.update(self._search_json_ld(webpage, video_id, fatal=False))
info.update(self._extract_aen_smil(media_url, video_id, auth))
return info
class HistoryTopicIE(AENetworksBaseIE):
IE_NAME = 'history:topic'
IE_DESC = 'History.com Topic'
_VALID_URL = r'https?://(?:www\.)?history\.com/topics/[^/]+/(?P<id>[\w+-]+?)-video'
_TESTS = [{
'url': 'https://www.history.com/topics/valentines-day/history-of-valentines-day-video',
'info_dict': {
'id': '40700995724',
'ext': 'mp4',
'title': "History of Valentine’s Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729,
'upload_date': '20130806',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}]
def theplatform_url_result(self, theplatform_url, video_id, query):
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(
update_url_query(theplatform_url, query),
{
'sig': {
'key': self._THEPLATFORM_KEY,
'secret': self._THEPLATFORM_SECRET,
},
'force_smil_url': True
}),
'ie_key': 'ThePlatform',
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'<phoenix-iframe[^>]+src="[^"]+\btpid=(\d+)', webpage, 'tpid')
result = self._download_json(
'https://feeds.video.aetnd.com/api/v2/history/videos',
video_id, query={'filter[id]': video_id})['results'][0]
title = result['title']
info = self._extract_aen_smil(result['publicUrl'], video_id)
info.update({
'title': title,
'description': result.get('description'),
'duration': int_or_none(result.get('duration')),
'timestamp': int_or_none(result.get('added'), 1000),
})
return info
| gpl-3.0 |
minhphung171093/GreenERP | openerp/tests/common.py | 15 | 15623 | # -*- coding: utf-8 -*-
"""
The module :mod:`openerp.tests.common` provides unittest test cases and a few
helpers and classes to write tests.
"""
import errno
import glob
import importlib
import json
import logging
import os
import select
import subprocess
import threading
import time
import itertools
import unittest
import urllib2
import xmlrpclib
from contextlib import contextmanager
from datetime import datetime, timedelta
from pprint import pformat
import werkzeug
import openerp
from openerp import api
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
# The openerp library is supposed already configured.
ADDONS_PATH = openerp.tools.config['addons_path']
HOST = '127.0.0.1'
PORT = openerp.tools.config['xmlrpc_port']
# Useless constant, tests are aware of the content of demo data
ADMIN_USER_ID = openerp.SUPERUSER_ID
def get_db_name():
db = openerp.tools.config['db_name']
# If the database name is not provided on the command-line,
# use the one on the thread (which means if it is provided on
# the command-line, this will break when installing another
# database from XML-RPC).
if not db and hasattr(threading.current_thread(), 'dbname'):
return threading.current_thread().dbname
return db
# For backwards-compatibility - get_db_name() should be used instead
DB = get_db_name()
def at_install(flag):
""" Sets the at-install state of a test, the flag is a boolean specifying
whether the test should (``True``) or should not (``False``) run during
module installation.
By default, tests are run right after installing the module, before
starting the installation of the next module.
"""
def decorator(obj):
obj.at_install = flag
return obj
return decorator
def post_install(flag):
""" Sets the post-install state of a test. The flag is a boolean
specifying whether the test should or should not run after a set of
module installations.
By default, tests are *not* run after installation of all modules in the
current installation set.
"""
def decorator(obj):
obj.post_install = flag
return obj
return decorator
class BaseCase(unittest.TestCase):
"""
Subclass of TestCase for common OpenERP-specific code.
This class is abstract and expects self.registry, self.cr and self.uid to be
initialized by subclasses.
"""
def cursor(self):
return self.registry.cursor()
def ref(self, xid):
""" Returns database ID for the provided :term:`external identifier`,
shortcut for ``get_object_reference``
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: registered id
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
_, id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module, xid)
return id
def browse_ref(self, xid):
""" Returns a record object for the provided
:term:`external identifier`
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: :class:`~openerp.models.BaseModel`
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
return self.registry('ir.model.data').get_object(self.cr, self.uid, module, xid)
@contextmanager
def _assertRaises(self, exception):
""" Context manager that clears the environment upon failure. """
with super(BaseCase, self).assertRaises(exception) as cm:
with self.env.clear_upon_failure():
yield cm
def assertRaises(self, exception, func=None, *args, **kwargs):
if func:
with self._assertRaises(exception):
func(*args, **kwargs)
else:
return self._assertRaises(exception)
class TransactionCase(BaseCase):
""" TestCase in which each test method is run in its own transaction,
and with its own cursor. The transaction is rolled back and the cursor
is closed after each test.
"""
def setUp(self):
self.registry = RegistryManager.get(get_db_name())
#: current transaction's cursor
self.cr = self.cursor()
self.uid = openerp.SUPERUSER_ID
#: :class:`~openerp.api.Environment` for the current test case
self.env = api.Environment(self.cr, self.uid, {})
@self.addCleanup
def reset():
# rollback and close the cursor, and reset the environments
self.registry.clear_caches()
self.env.reset()
self.cr.rollback()
self.cr.close()
def patch_order(self, model, order):
m_e = self.env[model]
m_r = self.registry(model)
old_order = m_e._order
@self.addCleanup
def cleanup():
m_r._order = type(m_e)._order = old_order
m_r._order = type(m_e)._order = order
class SingleTransactionCase(BaseCase):
""" TestCase in which all test methods are run in the same transaction,
the transaction is started with the first test method and rolled back at
the end of the last.
"""
@classmethod
def setUpClass(cls):
cls.registry = RegistryManager.get(get_db_name())
cls.cr = cls.registry.cursor()
cls.uid = openerp.SUPERUSER_ID
cls.env = api.Environment(cls.cr, cls.uid, {})
@classmethod
def tearDownClass(cls):
# rollback and close the cursor, and reset the environments
cls.registry.clear_caches()
cls.env.reset()
cls.cr.rollback()
cls.cr.close()
savepoint_seq = itertools.count()
class SavepointCase(SingleTransactionCase):
""" Similar to :class:`SingleTransactionCase` in that all test methods
are run in a single transaction *but* each test case is run inside a
rollbacked savepoint (sub-transaction).
Useful for test cases containing fast tests but with significant database
setup common to all cases (complex in-db test data): :meth:`~.setUpClass`
can be used to generate db test data once, then all test cases use the
same data without influencing one another but without having to recreate
the test data either.
"""
def setUp(self):
self._savepoint_id = next(savepoint_seq)
self.cr.execute('SAVEPOINT test_%d' % self._savepoint_id)
def tearDown(self):
self.cr.execute('ROLLBACK TO SAVEPOINT test_%d' % self._savepoint_id)
self.env.clear()
self.registry.clear_caches()
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""
HTTPRedirectHandler is predicated upon HTTPErrorProcessor being used and
works by intercepting 3xy "errors".
Inherit from it to handle 3xy non-error responses instead, as we're not
using the error processor
"""
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if 300 <= code < 400:
return self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HttpCase(TransactionCase):
""" Transactional HTTP TestCase with url_open and phantomjs helpers.
"""
def __init__(self, methodName='runTest'):
super(HttpCase, self).__init__(methodName)
# v8 api with correct xmlrpc exception handling.
self.xmlrpc_url = url_8 = 'http://%s:%d/xmlrpc/2/' % (HOST, PORT)
self.xmlrpc_common = xmlrpclib.ServerProxy(url_8 + 'common')
self.xmlrpc_db = xmlrpclib.ServerProxy(url_8 + 'db')
self.xmlrpc_object = xmlrpclib.ServerProxy(url_8 + 'object')
def setUp(self):
super(HttpCase, self).setUp()
self.registry.enter_test_mode()
# setup a magic session_id that will be rollbacked
self.session = openerp.http.root.session_store.new()
self.session_id = self.session.sid
self.session.db = get_db_name()
openerp.http.root.session_store.save(self.session)
# setup an url opener helper
self.opener = urllib2.OpenerDirector()
self.opener.add_handler(urllib2.UnknownHandler())
self.opener.add_handler(urllib2.HTTPHandler())
self.opener.add_handler(urllib2.HTTPSHandler())
self.opener.add_handler(urllib2.HTTPCookieProcessor())
self.opener.add_handler(RedirectHandler())
self.opener.addheaders.append(('Cookie', 'session_id=%s' % self.session_id))
def tearDown(self):
self.registry.leave_test_mode()
super(HttpCase, self).tearDown()
def url_open(self, url, data=None, timeout=10):
if url.startswith('/'):
url = "http://%s:%s%s" % (HOST, PORT, url)
return self.opener.open(url, data, timeout)
def authenticate(self, user, password):
# stay non-authenticated
if user is None:
return
db = get_db_name()
Users = self.registry['res.users']
uid = Users.authenticate(db, user, password, None)
# self.session.authenticate(db, user, password, uid=uid)
# OpenERPSession.authenticate accesses the current request, which we
# don't have, so reimplement it manually...
session = self.session
session.db = db
session.uid = uid
session.login = user
session.password = password
session.context = Users.context_get(self.cr, uid) or {}
session.context['uid'] = uid
session._fix_lang(session.context)
openerp.http.root.session_store.save(session)
def phantom_poll(self, phantom, timeout):
""" Phantomjs Test protocol.
Use console.log in phantomjs to output test results:
- for a success: console.log("ok")
- for an error: console.log("error")
Other lines are relayed to the test log.
"""
t0 = datetime.now()
td = timedelta(seconds=timeout)
buf = bytearray()
while True:
# timeout
self.assertLess(datetime.now() - t0, td,
"PhantomJS tests should take less than %s seconds" % timeout)
# read a byte
try:
ready, _, _ = select.select([phantom.stdout], [], [], 0.5)
except select.error, e:
# In Python 2, select.error has no relation to IOError or
# OSError, and no errno/strerror/filename, only a pair of
# unnamed arguments (matching errno and strerror)
err, _ = e.args
if err == errno.EINTR:
continue
raise
if ready:
s = phantom.stdout.read(1)
if not s:
break
buf.append(s)
# process lines
if '\n' in buf and (not buf.startswith('<phantomLog>') or '</phantomLog>' in buf):
if buf.startswith('<phantomLog>'):
line = buf[12:buf.index('</phantomLog>')]
buf = bytearray()
else:
line, buf = buf.split('\n', 1)
line = str(line)
lline = line.lower()
if lline.startswith(("error", "server application error")):
try:
# when errors occur the execution stack may be sent as a JSON
prefix = lline.index('error') + 6
_logger.error("phantomjs: %s", pformat(json.loads(line[prefix:])))
except ValueError:
line_ = line.split('\n\n')
_logger.error("phantomjs: %s", line_[0])
# The second part of the log is for debugging
if len(line_) > 1:
_logger.info("phantomjs: \n%s", line.split('\n\n', 1)[1])
pass
break
elif lline.startswith("warning"):
_logger.warn("phantomjs: %s", line)
else:
_logger.info("phantomjs: %s", line)
if line == "ok":
break
def phantom_run(self, cmd, timeout):
_logger.info('phantom_run executing %s', ' '.join(cmd))
ls_glob = os.path.expanduser('~/.qws/share/data/Ofi Labs/PhantomJS/http_%s_%s.*' % (HOST, PORT))
for i in glob.glob(ls_glob):
_logger.info('phantomjs unlink localstorage %s', i)
os.unlink(i)
try:
phantom = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None)
except OSError:
raise unittest.SkipTest("PhantomJS not found")
try:
self.phantom_poll(phantom, timeout)
finally:
# kill phantomjs if phantom.exit() wasn't called in the test
if phantom.poll() is None:
phantom.terminate()
phantom.wait()
self._wait_remaining_requests()
# we ignore phantomjs return code as we kill it as soon as we have ok
_logger.info("phantom_run execution finished")
def _wait_remaining_requests(self):
t0 = int(time.time())
for thread in threading.enumerate():
if thread.name.startswith('openerp.service.http.request.'):
while thread.isAlive():
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
t1 = int(time.time())
if t0 != t1:
_logger.info('remaining requests')
openerp.tools.misc.dumpstacks()
t0 = t1
def phantom_js(self, url_path, code, ready="window", login=None, timeout=60, **kw):
""" Test js code running in the browser
- optionnally log as 'login'
- load page given by url_path
- wait for ready object to be available
- eval(code) inside the page
To signal success test do:
console.log('ok')
To signal failure do:
console.log('error')
If neither are done before timeout test fails.
"""
options = {
'port': PORT,
'db': get_db_name(),
'url_path': url_path,
'code': code,
'ready': ready,
'timeout' : timeout,
'session_id': self.session_id,
}
options.update(kw)
self.authenticate(login, login)
phantomtest = os.path.join(os.path.dirname(__file__), 'phantomtest.js')
cmd = ['phantomjs', phantomtest, json.dumps(options)]
self.phantom_run(cmd, timeout)
def can_import(module):
""" Checks if <module> can be imported, returns ``True`` if it can be,
``False`` otherwise.
To use with ``unittest.skipUnless`` for tests conditional on *optional*
dependencies, which may or may be present but must still be tested if
possible.
"""
try:
importlib.import_module(module)
except ImportError:
return False
else:
return True
| gpl-3.0 |
vikas-parashar/zulip | zerver/lib/cache.py | 3 | 15619 | from __future__ import absolute_import
from __future__ import print_function
from functools import wraps
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.conf import settings
from django.db.models import Q
from django.core.cache.backends.base import BaseCache
from typing import Any, Callable, Iterable, Optional, Union, TypeVar
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import subprocess
import time
import base64
import random
import sys
import os
import os.path
import hashlib
import six
from six import text_type
if False:
from zerver.models import UserProfile, Realm, Message
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time():
# type: () -> float
return remote_cache_total_time
def get_remote_cache_requests():
# type: () -> int
return remote_cache_total_requests
def remote_cache_stats_start():
# type: () -> None
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish():
# type: () -> None
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix():
# type: () -> text_type
if settings.TEST_SUITE:
# This sets the prefix mostly for the benefit of the JS tests.
# The Python tests overwrite KEY_PREFIX on each test.
return u'test_suite:%s:' % (text_type(os.getpid()),)
# directory `var` should exist in production
subprocess.check_call(["mkdir", "-p", os.path.join(settings.DEPLOY_ROOT, "var")])
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(text_type(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: text_type
def bounce_key_prefix_for_testing(test_name):
# type: (text_type) -> None
global KEY_PREFIX
KEY_PREFIX = test_name + u':' + text_type(os.getpid()) + u':'
def get_cache_backend(cache_name):
# type: (Optional[str]) -> BaseCache
if cache_name is None:
return djcache
return caches[cache_name]
def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None):
# type: (Any, Optional[str], Optional[int], Optional[str]) -> Any
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key, val, cache_name=None, timeout=None):
# type: (text_type, Any, Optional[str], Optional[int]) -> None
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key, cache_name=None):
# type: (text_type, Optional[str]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys, cache_name=None):
# type: (List[text_type], Optional[str]) -> Dict[text_type, Any]
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items, cache_name=None, timeout=None):
# type: (Dict[text_type, Any], Optional[str], Optional[int]) -> None
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def cache_delete(key, cache_name=None):
# type: (text_type, Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items, cache_name=None):
# type: (Iterable[text_type], Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
ObjKT = TypeVar('ObjKT', int, text_type)
ItemT = Any # https://github.com/python/mypy/issues/1721
CompressedItemT = Any # https://github.com/python/mypy/issues/1721
def generic_bulk_cached_fetch(cache_key_function, # type: Callable[[ObjKT], text_type]
query_function, # type: Callable[[List[ObjKT]], Iterable[Any]]
object_ids, # type: Iterable[ObjKT]
extractor=lambda obj: obj, # type: Callable[[CompressedItemT], ItemT]
setter=lambda obj: obj, # type: Callable[[ItemT], CompressedItemT]
id_fetcher=lambda obj: obj.id, # type: Callable[[Any], ObjKT]
cache_transformer=lambda obj: obj # type: Callable[[Any], ItemT]
):
# type: (...) -> Dict[ObjKT, Any]
cache_keys = {} # type: Dict[ObjKT, text_type]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects = cache_get_many([cache_keys[object_id]
for object_id in object_ids])
for (key, val) in cached_objects.items():
cached_objects[key] = extractor(cached_objects[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[text_type, Any]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func):
# type: (FuncT) -> FuncT
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__) # type: ignore # https://github.com/python/mypy/issues/1923
@wraps(func)
def keyfunc(*args, **kwargs):
# type: (*Any, **Any) -> str
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def display_recipient_cache_key(recipient_id):
# type: (int) -> text_type
return u"display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email):
# type: (text_type) -> text_type
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return u'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_by_id_cache_key(user_profile_id):
# type: (int) -> text_type
return u"user_profile_by_id:%s" % (user_profile_id,)
# TODO: Refactor these cache helpers into another file that can import
# models.py so that python3-style type annotations can also work.
def cache_save_user_profile(user_profile):
# type: (UserProfile) -> None
cache_set(user_profile_by_id_cache_key(user_profile.id), user_profile, timeout=3600*24*7)
active_user_dict_fields = ['id', 'full_name', 'short_name', 'email', 'is_realm_admin', 'is_bot'] # type: List[str]
def active_user_dicts_in_realm_cache_key(realm):
# type: (Realm) -> text_type
return u"active_user_dicts_in_realm:%s" % (realm.id,)
active_bot_dict_fields = ['id', 'full_name', 'short_name',
'email', 'default_sending_stream__name',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source'] # type: List[str]
def active_bot_dicts_in_realm_cache_key(realm):
# type: (Realm) -> text_type
return u"active_bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name, realm):
# type: (text_type, Union[Realm, int]) -> text_type
from zerver.models import Realm
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return u"stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def delete_user_profile_caches(user_profiles):
# type: (Iterable[UserProfile]) -> None
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
cache_delete_many(keys)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender, **kwargs):
# type: (Any, **Any) -> None
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if kwargs.get('update_fields') is None or \
len(set(active_user_dict_fields + ['is_active']) & set(kwargs['update_fields'])) > 0:
cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate our active_bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and (kwargs['update_fields'] is None or
(set(active_bot_dict_fields + ['is_active']) &
set(kwargs['update_fields']))):
cache_delete(active_bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if kwargs.get('update_fields') is None or "alert_words" in kwargs['update_fields']:
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
if realm.deactivated:
cache_delete(active_user_dicts_in_realm_cache_key(realm))
cache_delete(active_bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
def realm_alert_words_cache_key(realm):
# type: (Realm) -> text_type
return u"realm_alert_words:%s" % (realm.domain,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender, **kwargs):
# type: (Any, **Any) -> None
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)
).exists():
cache_delete(active_bot_dicts_in_realm_cache_key(stream.realm))
# TODO: Rename to_dict_cache_key_id and to_dict_cache_key
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> text_type
return u'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> text_type
return to_dict_cache_key_id(message.id, apply_markdown)
def flush_message(sender, **kwargs):
# type: (Any, **Any) -> None
message = kwargs['instance']
cache_delete(to_dict_cache_key(message, False))
cache_delete(to_dict_cache_key(message, True))
| apache-2.0 |
ganeshrn/ansible | test/units/executor/test_task_executor.py | 34 | 15879 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import mock
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.errors import AnsibleError
from ansible.executor.task_executor import TaskExecutor, remove_omit
from ansible.plugins.loader import action_loader, lookup_loader
from ansible.parsing.yaml.objects import AnsibleUnicode
from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes
from ansible.module_utils.six import text_type
from units.mock.loader import DictDataLoader
class TestTaskExecutor(unittest.TestCase):
def test_task_executor_init(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
def test_task_executor_run(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task._role._role_path = '/path/to/role/foo'
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
te._get_loop_items = MagicMock(return_value=None)
te._execute = MagicMock(return_value=dict())
res = te.run()
te._get_loop_items = MagicMock(return_value=[])
res = te.run()
te._get_loop_items = MagicMock(return_value=['a', 'b', 'c'])
te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
res = te.run()
te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
res = te.run()
self.assertIn("failed", res)
def test_task_executor_run_clean_res(self):
te = TaskExecutor(None, MagicMock(), None, None, None, None, None, None)
te._get_loop_items = MagicMock(return_value=[1])
te._run_loop = MagicMock(
return_value=[
{
'unsafe_bytes': AnsibleUnsafeBytes(b'{{ $bar }}'),
'unsafe_text': AnsibleUnsafeText(u'{{ $bar }}'),
'bytes': b'bytes',
'text': u'text',
'int': 1,
}
]
)
res = te.run()
data = res['results'][0]
self.assertIsInstance(data['unsafe_bytes'], AnsibleUnsafeText)
self.assertIsInstance(data['unsafe_text'], AnsibleUnsafeText)
self.assertIsInstance(data['bytes'], text_type)
self.assertIsInstance(data['text'], text_type)
self.assertIsInstance(data['int'], int)
def test_task_executor_get_loop_items(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.loop_with = 'items'
mock_task.loop = ['a', 'b', 'c']
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_shared_loader.lookup_loader = lookup_loader
new_stdin = None
job_vars = dict()
mock_queue = MagicMock()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
items = te._get_loop_items()
self.assertEqual(items, ['a', 'b', 'c'])
def test_task_executor_run_loop(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _copy(exclude_parent=False, exclude_tasks=False):
new_item = MagicMock()
return new_item
mock_task = MagicMock()
mock_task.copy.side_effect = _copy
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_queue = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=mock_shared_loader,
final_q=mock_queue,
)
def _execute(variables):
return dict(item=variables.get('item'))
te._execute = MagicMock(side_effect=_execute)
res = te._run_loop(items)
self.assertEqual(len(res), 3)
def test_task_executor_get_action_handler(self):
te = TaskExecutor(
host=MagicMock(),
task=MagicMock(),
job_vars={},
play_context=MagicMock(),
new_stdin=None,
loader=DictDataLoader({}),
shared_loader_obj=MagicMock(),
final_q=MagicMock(),
)
action_loader = te._shared_loader_obj.action_loader
action_loader.has_plugin.return_value = True
action_loader.get.return_value = mock.sentinel.handler
mock_connection = MagicMock()
mock_templar = MagicMock()
action = 'namespace.prefix_suffix'
te._task.action = action
handler = te._get_action_handler(mock_connection, mock_templar)
self.assertIs(mock.sentinel.handler, handler)
action_loader.has_plugin.assert_called_once_with(
action, collection_list=te._task.collections)
action_loader.get.assert_called_once_with(
te._task.action, task=te._task, connection=mock_connection,
play_context=te._play_context, loader=te._loader,
templar=mock_templar, shared_loader_obj=te._shared_loader_obj,
collection_list=te._task.collections)
def test_task_executor_get_handler_prefix(self):
te = TaskExecutor(
host=MagicMock(),
task=MagicMock(),
job_vars={},
play_context=MagicMock(),
new_stdin=None,
loader=DictDataLoader({}),
shared_loader_obj=MagicMock(),
final_q=MagicMock(),
)
action_loader = te._shared_loader_obj.action_loader
action_loader.has_plugin.side_effect = [False, True]
action_loader.get.return_value = mock.sentinel.handler
action_loader.__contains__.return_value = True
mock_connection = MagicMock()
mock_templar = MagicMock()
action = 'namespace.netconf_suffix'
module_prefix = action.split('_')[0]
te._task.action = action
handler = te._get_action_handler(mock_connection, mock_templar)
self.assertIs(mock.sentinel.handler, handler)
action_loader.has_plugin.assert_has_calls([mock.call(action, collection_list=te._task.collections),
mock.call(module_prefix, collection_list=te._task.collections)])
action_loader.get.assert_called_once_with(
module_prefix, task=te._task, connection=mock_connection,
play_context=te._play_context, loader=te._loader,
templar=mock_templar, shared_loader_obj=te._shared_loader_obj,
collection_list=te._task.collections)
def test_task_executor_get_handler_normal(self):
te = TaskExecutor(
host=MagicMock(),
task=MagicMock(),
job_vars={},
play_context=MagicMock(),
new_stdin=None,
loader=DictDataLoader({}),
shared_loader_obj=MagicMock(),
final_q=MagicMock(),
)
action_loader = te._shared_loader_obj.action_loader
action_loader.has_plugin.return_value = False
action_loader.get.return_value = mock.sentinel.handler
action_loader.__contains__.return_value = False
mock_connection = MagicMock()
mock_templar = MagicMock()
action = 'namespace.prefix_suffix'
module_prefix = action.split('_')[0]
te._task.action = action
handler = te._get_action_handler(mock_connection, mock_templar)
self.assertIs(mock.sentinel.handler, handler)
action_loader.has_plugin.assert_has_calls([mock.call(action, collection_list=te._task.collections),
mock.call(module_prefix, collection_list=te._task.collections)])
action_loader.get.assert_called_once_with(
'ansible.legacy.normal', task=te._task, connection=mock_connection,
play_context=te._play_context, loader=te._loader,
templar=mock_templar, shared_loader_obj=te._shared_loader_obj,
collection_list=None)
def test_task_executor_execute(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.args = dict()
mock_task.retries = 0
mock_task.delay = -1
mock_task.register = 'foo'
mock_task.until = None
mock_task.changed_when = None
mock_task.failed_when = None
mock_task.post_validate.return_value = None
# mock_task.async_val cannot be left unset, because on Python 3 MagicMock()
# > 0 raises a TypeError There are two reasons for using the value 1
# here: on Python 2 comparing MagicMock() > 0 returns True, and the
# other reason is that if I specify 0 here, the test fails. ;)
mock_task.async_val = 1
mock_task.poll = 0
mock_play_context = MagicMock()
mock_play_context.post_validate.return_value = None
mock_play_context.update_vars.return_value = None
mock_connection = MagicMock()
mock_connection.set_host_overrides.return_value = None
mock_connection._connect.return_value = None
mock_action = MagicMock()
mock_queue = MagicMock()
shared_loader = None
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=shared_loader,
final_q=mock_queue,
)
te._get_connection = MagicMock(return_value=mock_connection)
te._get_action_handler = MagicMock(return_value=mock_action)
mock_action.run.return_value = dict(ansible_facts=dict())
res = te._execute()
mock_task.changed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
res = te._execute()
mock_task.changed_when = None
mock_task.failed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
res = te._execute()
mock_task.failed_when = None
mock_task.evaluate_conditional.return_value = False
res = te._execute()
mock_task.evaluate_conditional.return_value = True
mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
mock_task.action = 'include'
res = te._execute()
def test_task_executor_poll_async_result(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.async_val = 0.1
mock_task.poll = 0.05
mock_play_context = MagicMock()
mock_connection = MagicMock()
mock_action = MagicMock()
mock_queue = MagicMock()
shared_loader = MagicMock()
shared_loader.action_loader = action_loader
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host=mock_host,
task=mock_task,
job_vars=job_vars,
play_context=mock_play_context,
new_stdin=new_stdin,
loader=fake_loader,
shared_loader_obj=shared_loader,
final_q=mock_queue,
)
te._connection = MagicMock()
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(stdout='')
return mock_action
# testing with some bad values in the result passed to poll async,
# and with a bad value returned from the mock action
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(), templar=mock_templar)
self.assertIn('failed', res)
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertIn('failed', res)
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(finished=1)
return mock_action
# now testing with good values
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertEqual(res, dict(finished=1))
def test_recursive_remove_omit(self):
omit_token = 'POPCORN'
data = {
'foo': 'bar',
'baz': 1,
'qux': ['one', 'two', 'three'],
'subdict': {
'remove': 'POPCORN',
'keep': 'not_popcorn',
'subsubdict': {
'remove': 'POPCORN',
'keep': 'not_popcorn',
},
'a_list': ['POPCORN'],
},
'a_list': ['POPCORN'],
'list_of_lists': [
['some', 'thing'],
],
'list_of_dicts': [
{
'remove': 'POPCORN',
}
],
}
expected = {
'foo': 'bar',
'baz': 1,
'qux': ['one', 'two', 'three'],
'subdict': {
'keep': 'not_popcorn',
'subsubdict': {
'keep': 'not_popcorn',
},
'a_list': ['POPCORN'],
},
'a_list': ['POPCORN'],
'list_of_lists': [
['some', 'thing'],
],
'list_of_dicts': [{}],
}
self.assertEqual(remove_omit(data, omit_token), expected)
| gpl-3.0 |
Plexxi/st2 | st2api/st2api/cmd/api.py | 3 | 2883 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# NOTE: It's important that we perform monkey patch as early as possible before any other modules
# are important, otherwise SSL support for MongoDB won't work.
# See https://github.com/StackStorm/st2/issues/4832 and https://github.com/gevent/gevent/issues/1016
# for details.
from st2common.util.monkey_patch import monkey_patch
monkey_patch()
import eventlet
from oslo_config import cfg
from eventlet import wsgi
from st2common import log as logging
from st2common.service_setup import setup as common_setup
from st2common.service_setup import teardown as common_teardown
from st2api import config
config.register_opts(ignore_errors=True)
from st2api import app
from st2api.validation import validate_rbac_is_correctly_configured
__all__ = ["main"]
LOG = logging.getLogger(__name__)
# How much time to give to the request in progress to finish in seconds before killing them
WSGI_SERVER_REQUEST_SHUTDOWN_TIME = 2
def _setup():
capabilities = {
"name": "api",
"listen_host": cfg.CONF.api.host,
"listen_port": cfg.CONF.api.port,
"type": "active",
}
common_setup(
service="api",
config=config,
setup_db=True,
register_mq_exchanges=True,
register_signal_handlers=True,
register_internal_trigger_types=True,
service_registry=True,
capabilities=capabilities,
)
# Additional pre-run time checks
validate_rbac_is_correctly_configured()
def _run_server():
host = cfg.CONF.api.host
port = cfg.CONF.api.port
LOG.info("(PID=%s) ST2 API is serving on http://%s:%s.", os.getpid(), host, port)
max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
worker_pool = eventlet.GreenPool(max_pool_size)
sock = eventlet.listen((host, port))
wsgi.server(
sock, app.setup_app(), custom_pool=worker_pool, log=LOG, log_output=False
)
return 0
def _teardown():
common_teardown()
def main():
try:
_setup()
return _run_server()
except SystemExit as exit_code:
sys.exit(exit_code)
except Exception:
LOG.exception("(PID=%s) ST2 API quit due to exception.", os.getpid())
return 1
finally:
_teardown()
| apache-2.0 |
nicoddemus/backtrader | samples/quickstart05.py | 1 | 3278 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
self.buy()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(modpath, './datas/yahoo/oracle-1995-2014.csv')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 01, 01),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reversed=True)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
| gpl-3.0 |
dednal/chromium.src | tools/telemetry/third_party/pyserial/serial/tools/list_ports_linux.py | 90 | 5399 | #!/usr/bin/env python
# portable serial port access with python
#
# This is a module that gathers a list of serial ports including details on
# GNU/Linux systems
#
# (C) 2011-2013 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
import glob
import sys
import os
import re
try:
import subprocess
except ImportError:
def popen(argv):
try:
si, so = os.popen4(' '.join(argv))
return so.read().strip()
except:
raise IOError('lsusb failed')
else:
def popen(argv):
try:
return subprocess.check_output(argv, stderr=subprocess.STDOUT).strip()
except:
raise IOError('lsusb failed')
# The comports function is expected to return an iterable that yields tuples of
# 3 strings: port name, human readable description and a hardware ID.
#
# as currently no method is known to get the second two strings easily, they
# are currently just identical to the port name.
# try to detect the OS so that a device can be selected...
plat = sys.platform.lower()
def read_line(filename):
"""help function to read a single line from a file. returns none"""
try:
f = open(filename)
line = f.readline().strip()
f.close()
return line
except IOError:
return None
def re_group(regexp, text):
"""search for regexp in text, return 1st group on match"""
if sys.version < '3':
m = re.search(regexp, text)
else:
# text is bytes-like
m = re.search(regexp, text.decode('ascii', 'replace'))
if m: return m.group(1)
# try to extract descriptions from sysfs. this was done by experimenting,
# no guarantee that it works for all devices or in the future...
def usb_sysfs_hw_string(sysfs_path):
"""given a path to a usb device in sysfs, return a string describing it"""
bus, dev = os.path.basename(os.path.realpath(sysfs_path)).split('-')
snr = read_line(sysfs_path+'/serial')
if snr:
snr_txt = ' SNR=%s' % (snr,)
else:
snr_txt = ''
return 'USB VID:PID=%s:%s%s' % (
read_line(sysfs_path+'/idVendor'),
read_line(sysfs_path+'/idProduct'),
snr_txt
)
def usb_lsusb_string(sysfs_path):
base = os.path.basename(os.path.realpath(sysfs_path))
bus = base.split('-')[0]
try:
dev = int(read_line(os.path.join(sysfs_path, 'devnum')))
desc = popen(['lsusb', '-v', '-s', '%s:%s' % (bus, dev)])
# descriptions from device
iManufacturer = re_group('iManufacturer\s+\w+ (.+)', desc)
iProduct = re_group('iProduct\s+\w+ (.+)', desc)
iSerial = re_group('iSerial\s+\w+ (.+)', desc) or ''
# descriptions from kernel
idVendor = re_group('idVendor\s+0x\w+ (.+)', desc)
idProduct = re_group('idProduct\s+0x\w+ (.+)', desc)
# create descriptions. prefer text from device, fall back to the others
return '%s %s %s' % (iManufacturer or idVendor, iProduct or idProduct, iSerial)
except IOError:
return base
def describe(device):
"""\
Get a human readable description.
For USB-Serial devices try to run lsusb to get a human readable description.
For USB-CDC devices read the description from sysfs.
"""
base = os.path.basename(device)
# USB-Serial devices
sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)
if os.path.exists(sys_dev_path):
sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))
return usb_lsusb_string(sys_usb)
# USB-CDC devices
sys_dev_path = '/sys/class/tty/%s/device/interface' % (base,)
if os.path.exists(sys_dev_path):
return read_line(sys_dev_path)
# USB Product Information
sys_dev_path = '/sys/class/tty/%s/device' % (base,)
if os.path.exists(sys_dev_path):
product_name_file = os.path.dirname(os.path.realpath(sys_dev_path)) + "/product"
if os.path.exists(product_name_file):
return read_line(product_name_file)
return base
def hwinfo(device):
"""Try to get a HW identification using sysfs"""
base = os.path.basename(device)
if os.path.exists('/sys/class/tty/%s/device' % (base,)):
# PCI based devices
sys_id_path = '/sys/class/tty/%s/device/id' % (base,)
if os.path.exists(sys_id_path):
return read_line(sys_id_path)
# USB-Serial devices
sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)
if os.path.exists(sys_dev_path):
sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))
return usb_sysfs_hw_string(sys_usb)
# USB-CDC devices
if base.startswith('ttyACM'):
sys_dev_path = '/sys/class/tty/%s/device' % (base,)
if os.path.exists(sys_dev_path):
return usb_sysfs_hw_string(sys_dev_path + '/..')
return 'n/a' # XXX directly remove these from the list?
def comports():
devices = glob.glob('/dev/ttyS*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
return [(d, describe(d), hwinfo(d)) for d in devices]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print "%s: %s [%s]" % (port, desc, hwid)
| bsd-3-clause |
oculusstorystudio/kraken | Python/kraken/core/objects/attributes/attribute.py | 1 | 6743 | """Kraken - objects.Attributes.Attribute module.
Classes:
Attribute - Base Attribute.
"""
from kraken.core.objects.scene_item import SceneItem
class Attribute(SceneItem):
"""Attribute object."""
def __init__(self, name, value, parent=None, metaData=None):
super(Attribute, self).__init__(name, metaData=metaData)
self._value = value
self._connection = None
self._keyable = True
self._lock = False
self._animatable = True
self._callback = None
if parent is not None:
if parent.getTypeName() != 'AttributeGroup':
raise ValueError("Parent: " + parent.getName() +
" is not an Attribute Group!")
parent.addAttribute(self)
# ==============
# Value Methods
# ==============
def getValue(self):
"""Returns the value of the attribute.
Returns: Attribute Value.
"""
return self._value
def setValue(self, value):
"""Sets attribute value.
Args:
value: Value to set the attribute to.
Returns:
bool: True if successful.
"""
self._value = value
if self._callback is not None:
self._callback(value)
return True
def setValueChangeCallback(self, callback):
"""Sets the value of the attribute.
Args:
callback: Value to set the attribute to.
Returns:
bool: True if successful.
"""
self._callback = callback
return True
def getKeyable(self):
"""Returns the keyable state of the attribute.
Args:
argument (Type): description.
Returns:
bool: Keyable state of the attribute.
"""
return self._keyable
def setKeyable(self, value):
"""Sets the keyable state of the attribute.
Args:
value (bool): keyable state.
Returns:
bool: True if successful.
"""
if type(value) is not bool:
raise TypeError("Value is not of type 'bool'.")
self._keyable = value
return True
def getLock(self):
"""Returns the Lock state of the attribute.
Returns:
bool: Lock state of the attribute.
"""
return self._lock
def setLock(self, value):
"""Sets the lock state of the attribute..
Args:
value (bool): lock state.
Returns:
bool: True if successful.
"""
if type(value) is not bool:
raise TypeError("Value is not of type 'bool'.")
self._lock = value
return True
def getAnimatable(self):
"""Returns the animatable state of the attribute..
Returns:
bool: True if Animatable state of the attribute.
"""
return self._animatable
def setAnimatable(self, value):
"""Sets the animatable state of the attribute..
Args:
value (bool): animatable state.
Returns:
bool: True if successful.
"""
if type(value) is not bool:
raise TypeError("Value is not of type 'bool'.")
self._animatable = value
return True
def getRTVal(self):
"""Returns and RTVal object for this attribute.
Note:
This method should be re-implemented in concrete attribute classes.
Returns:
RTVal: RTVal object for this attribute.
Raises:
NotImplemented: Must be implemented by concrete attribute classes.
"""
raise NotImplementedError("This method should be re-implemented in concrete attribute classes.")
def validateValue(self, value):
"""Validates the incoming value is the correct type.
Note:
This method should be re-implemented in concrete attribute classes.
Args:
value: value to check the type of.
Returns:
bool: True if valid.
Raises:
NotImplemented: This method should be re-implemented in concrete attribute classes.
"""
raise NotImplementedError("This method should be re-implemented in concrete attribute classes.")
# ===================
# Connection Methods
# ===================
def isConnected(self):
"""Returns whether the attribute is connected or not.
Returns:
bool: True if is connected.
"""
if self._connection is None:
return False
return True
def getConnection(self):
"""Returns the connected attribute..
Returns:
Object: attribute driving this attribute.
"""
return self._connection
def connect(self, attribute, lock=False):
"""Connects this attribute with another..
Args:
attribute (Object): attribute that will drive this one.
Returns:
bool: True if successful.
"""
self.removeSource(self._connection)
self._connection = attribute
self.addSource(attribute)
if lock: # only lock if true, otherwise don't change # Currently is not working for visibility attr
self.setLock(lock)
return True
def disconnect(self):
"""Clears the connection of this attribute..
Returns:
bool: True if successful.
"""
self._connection = None
return True
# ====================
# Persistence Methods
# ====================
def jsonEncode(self, saver):
"""Encodes the object to a JSON structure.
Args:
saver (Object): saver object.
Returns:
Dict: A JSON structure containing the data for this SceneItem.
"""
classHierarchy = []
for cls in type.mro(type(self)):
if cls == object:
break
classHierarchy.append(cls.__name__)
jsonData = {
'__typeHierarchy__': classHierarchy,
'name': self._name,
'value': saver.encodeValue(self._value),
'parent': None
}
if self.getParent() is not None:
jsonData['parent'] = self.getParent().getName()
return jsonData
def jsonDecode(self, loader, jsonData):
"""Returns the color of the object..
Args:
loader (Object): Loader object.
jsonData (Dict): JSON object structure.
Returns:
bool: True if successful.
"""
self.name = jsonData['name']
self._value = jsonData['value']
return True
| bsd-3-clause |
YangWanjun/areaparking | areaparking/db_router.py | 1 | 1176 | from utils import constants
REVOLUTION_MODULES = ("revolution",)
class DbRouter(object):
def db_for_read(self, model, **hints):
if model._meta.app_label in REVOLUTION_MODULES:
return constants.DATABASE_REVOLUTION
else:
return constants.DATABASE_DEFAULT
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to auth_db.
"""
if model._meta.app_label in REVOLUTION_MODULES:
return constants.DATABASE_REVOLUTION
else:
return constants.DATABASE_DEFAULT
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
if obj1._meta.app_label in REVOLUTION_MODULES and \
obj2._meta.app_label in REVOLUTION_MODULES:
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the auth app only appears in the 'auth_db'
database.
"""
if app_label in REVOLUTION_MODULES:
return db == constants.DATABASE_REVOLUTION
return None | apache-2.0 |
maclandrol/ete | ete2/tools/phylobuild_lib/logger.py | 3 | 2706 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
import logging
__LOGINDENT__ = 0
class IndentedFormatter(logging.Formatter):
def __init__( self, fmt=None, datefmt=None ):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, rec ):
rec.indent = ' ' * __LOGINDENT__
out = logging.Formatter.format(self, rec)
return out
def set_logindent(x):
global __LOGINDENT__
__LOGINDENT__ = x
def logindent(x):
global __LOGINDENT__
__LOGINDENT__ += x
def get_logindent():
return __LOGINDENT__
def get_main_log(handler, level=20):
# Prepares main log
log = logging.getLogger("main")
log.setLevel(level)
lformat = IndentedFormatter("%(levelname) 4s@@1: - %(indent)s %(message)s")
logging.addLevelName(10, "@@3,12:DEBUG")
logging.addLevelName(20, "@@1,3:INFO")
logging.addLevelName(22, "@@1,3:INFO")
logging.addLevelName(24, "@@1,3:INFO")
logging.addLevelName(26, "@@1,3:INFO")
logging.addLevelName(28, "@@1,3:INFO")
logging.addLevelName(30, "@@2,11:WRNG")
logging.addLevelName(40, "@@2,10:ERR ")
logging.addLevelName(50, "@@2,10:DISASTER")
log_handler = logging.StreamHandler(handler)
log_handler.setFormatter(lformat)
log.addHandler(log_handler)
return log
| gpl-3.0 |
jan-scholz/minc-scripts | tag_cbind.py | 1 | 2750 | #!/usr/bin/env python
#
# 2015-04-10
# concate two tag files in column direction for input in tagtoxfm
from optparse import OptionParser
import numpy as np
from os import path
from StringIO import StringIO
def readTagFile(tagfilename, verbose=False):
"""read tag file and return numpy array with tags
(6 columns, a point per row)
returns tags as numpy array"""
with open(tagfilename, 'r') as f:
filetype = f.readline().strip()
nvolumes = f.readline().split('=')[1].strip().strip(';')
lines = f.readlines()
for i,l in enumerate(lines):
if 'points' in l.lower():
break
lines = ''.join(lines[(i+1):]).rstrip().rstrip(';')
if filetype.lower() != 'mni tag point file':
raise ValueError('not a "MNI Tag Point File", claims to be', filetype)
if nvolumes != '1':
raise ValueError('reading of only 1 volume supported, not:', nvolumes)
f = StringIO(lines)
tags = np.loadtxt(f)
if verbose:
print 'read %i points from tag file %s' % (tags.shape[0],tagfilename)
return tags
def cbind_tagfiles(outname, filename1, filename2, verbose=False):
"""read two tagfiles and column-bind them
(keeps only coordinates, last 3 coordinates get discarded)
returns combined tags numpy array"""
tags1 = readTagFile(filename1, verbose)
tags2 = readTagFile(filename2, verbose)
if tags1.shape[0] != tags2.shape[0]:
raise ValueError('input files have different number of rows')
outtags = np.hstack((tags1[:,0:3],tags2[:,0:3]))
footer = ';'
header = """MNI Tag Point File
Volumes = 2;
%% File created automatically by %s. Input tag files:
%% %s, %s
Points = """ % (path.basename(__file__), filename1, filename2)
np.savetxt(outname, outtags, fmt='%g', header=header, footer=footer, comments='')
return outtags
def main():
usage = "usage: %prog -o OUTPUT TAGFILE1 TAGFILE2"
description = "smooth labels through prob. exchange"
parser = OptionParser(usage=usage, description=description)
parser.add_option("--output", "-o", dest="output",
help="output file",
type="string")
parser.add_option("-v", "--verbose",
help='verbose output', action='store_true', default=False)
(options, args) = parser.parse_args()
if (len(args) < 2):
parser.error("require exactly two tag files as input")
if options.output is None:
parser.error("output is missing")
for f in args:
if not path.exists(f):
raise IOError('could not find file: ' + f)
cbind_tagfiles(options.output, args[0], args[1], verbose=options.verbose)
if __name__ == "__main__":
main()
| mit |
jasondunsmore/heat | heat/engine/clients/os/glance.py | 2 | 4221 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as gc
from glanceclient import exc
from glanceclient.openstack.common.apiclient import exceptions
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
from heat.engine import constraints
CLIENT_NAME = 'glance'
class GlanceClientPlugin(client_plugin.ClientPlugin):
exceptions_module = [exceptions, exc]
service_types = [IMAGE] = ['image']
def _create(self):
con = self.context
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
endpoint = self.url_for(service_type=self.IMAGE,
endpoint_type=endpoint_type)
args = {
'auth_url': con.auth_url,
'service_type': self.IMAGE,
'project_id': con.tenant_id,
'token': self.auth_token,
'endpoint_type': endpoint_type,
'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'),
'cert_file': self._get_client_option(CLIENT_NAME, 'cert_file'),
'key_file': self._get_client_option(CLIENT_NAME, 'key_file'),
'insecure': self._get_client_option(CLIENT_NAME, 'insecure')
}
return gc.Client('1', endpoint, **args)
def _find_with_attr(self, entity, **kwargs):
"""Find a item for entity with attributes matching ``**kwargs``."""
matches = list(self._findall_with_attr(entity, **kwargs))
num_matches = len(matches)
if num_matches == 0:
msg = ("No %(name)s matching %(args)s.") % {
'name': entity,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def _findall_with_attr(self, entity, **kwargs):
"""Find all items for entity with attributes matching ``**kwargs``."""
func = getattr(self.client(), entity)
filters = {'filters': kwargs}
return func.list(**filters)
def is_not_found(self, ex):
return isinstance(ex, (exceptions.NotFound, exc.HTTPNotFound))
def is_over_limit(self, ex):
return isinstance(ex, exc.HTTPOverLimit)
def is_conflict(self, ex):
return isinstance(ex, (exceptions.Conflict, exc.Conflict))
def find_image_by_name_or_id(self, image_identifier):
"""Return the ID for the specified image name or identifier.
:param image_identifier: image name or a UUID-like identifier
:returns: the id of the requested :image_identifier:
"""
return self._find_image_id(self.context.tenant_id,
image_identifier)
@os_client.MEMOIZE_FINDER
def _find_image_id(self, tenant_id, image_identifier):
# tenant id in the signature is used for the memoization key,
# that would differentiate similar resource names across tenants.
return self.get_image(image_identifier).id
def get_image(self, image_identifier):
"""Return the image object for the specified image name/id.
:param image_identifier: image name
:returns: an image object with name/id :image_identifier:
"""
try:
return self.client().images.get(image_identifier)
except exc.HTTPNotFound:
return self._find_with_attr('images', name=image_identifier)
class ImageConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exceptions.NotFound, exceptions.NoUniqueMatch)
resource_client_name = CLIENT_NAME
resource_getter_name = 'find_image_by_name_or_id'
| apache-2.0 |
ppiotr/Bibedit-some-refactoring | modules/miscutil/lib/dbquery.py | 2 | 17618 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
CDS Invenio utilities to run SQL queries.
The main API functions are:
- run_sql()
- run_sql_cached()
- run_sql_many()
but see the others as well.
"""
__revision__ = "$Id$"
# dbquery clients can import these from here:
# pylint: disable-msg=W0611
from MySQLdb import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, \
InternalError, NotSupportedError, \
ProgrammingError
import warnings
import string
import time
import marshal
import re
import sys
from zlib import compress, decompress
from thread import get_ident
from invenio.config import CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_MISCUTIL_SQL_MAX_CACHED_QUERIES, CFG_MISCUTIL_SQL_USE_SQLALCHEMY, \
CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT
from invenio.errorlib import register_exception
if CFG_MISCUTIL_SQL_USE_SQLALCHEMY:
try:
import sqlalchemy.pool as pool
import MySQLdb as mysqldb
mysqldb = pool.manage(mysqldb, use_threadlocal=True)
connect = mysqldb.connect
except ImportError:
CFG_MISCUTIL_SQL_USE_SQLALCHEMY = False
from MySQLdb import connect
else:
from MySQLdb import connect
## DB config variables. These variables are to be set in
## invenio-local.conf by admins and then replaced in situ in this file
## by calling "inveniocfg --update-dbexec".
## Note that they are defined here and not in config.py in order to
## prevent them from being exported accidentally elsewhere, as no-one
## should know DB credentials but this file.
## FIXME: this is more of a blast-from-the-past that should be fixed
## both here and in inveniocfg when the time permits.
CFG_DATABASE_HOST = 'localhost'
CFG_DATABASE_PORT = '3306'
CFG_DATABASE_NAME = 'cdsinvenio'
CFG_DATABASE_USER = 'cdsinvenio'
CFG_DATABASE_PASS = 'my123p$ss'
_DB_CONN = {}
try:
_db_cache
except NameError:
_db_cache = {}
def _db_login(relogin = 0):
"""Login to the database."""
## Note: we are using "use_unicode=False", because we want to
## receive strings from MySQL as Python UTF-8 binary string
## objects, not as Python Unicode string objects, as of yet.
## Note: "charset='utf8'" is needed for recent MySQLdb versions
## (such as 1.2.1_p2 and above). For older MySQLdb versions such
## as 1.2.0, an explicit "init_command='SET NAMES utf8'" parameter
## would constitute an equivalent. But we are not bothering with
## older MySQLdb versions here, since we are recommending to
## upgrade to more recent versions anyway.
if CFG_MISCUTIL_SQL_USE_SQLALCHEMY:
return connect(host=CFG_DATABASE_HOST, port=int(CFG_DATABASE_PORT),
db=CFG_DATABASE_NAME, user=CFG_DATABASE_USER,
passwd=CFG_DATABASE_PASS,
use_unicode=False, charset='utf8')
else:
thread_ident = get_ident()
if relogin:
_DB_CONN[thread_ident] = connect(host=CFG_DATABASE_HOST,
port=int(CFG_DATABASE_PORT),
db=CFG_DATABASE_NAME,
user=CFG_DATABASE_USER,
passwd=CFG_DATABASE_PASS,
use_unicode=False, charset='utf8')
return _DB_CONN[thread_ident]
else:
if _DB_CONN.has_key(thread_ident):
return _DB_CONN[thread_ident]
else:
_DB_CONN[thread_ident] = connect(host=CFG_DATABASE_HOST,
port=int(CFG_DATABASE_PORT),
db=CFG_DATABASE_NAME,
user=CFG_DATABASE_USER,
passwd=CFG_DATABASE_PASS,
use_unicode=False, charset='utf8')
return _DB_CONN[thread_ident]
def _db_logout():
"""Close a connection."""
try:
del _DB_CONN[get_ident()]
except KeyError:
pass
def run_sql_cached(sql, param=None, n=0, with_desc=0, affected_tables=['bibrec']):
"""
Run the SQL query and cache the SQL command for later reuse.
@param param: tuple of string params to insert in the query
(see notes below)
@param n: number of tuples in result (0 for unbounded)
@param with_desc: if true, will return a
DB API 7-tuple describing columns in query
@param affected_tables: is a list of tablenames of affected tables,
used to decide whether we should update the cache or whether we
can return cached result, depending on the last modification time
for corresponding tables. If empty, and if the cached result is
present in the cache, always return the cached result without
recomputing it. (This is useful to speed up queries that operate
on objects that virtually never change, e.g. list of defined
logical fields, that remain usually constant in between Apache
restarts. Note that this would be a dangerous default for any
query.)
@return: the result as provided by run_sql()
Note that it is pointless and even wrong to use this function with
SQL commands different from SELECT.
"""
## FIXME: The code below, checking table update times, was found
## to be slow in user storm situations. So let us rather run SQL
## statement live; it seems faster to let MySQL use its own cache
## than to constantly verify table update time. Later, a proper
## time-driven data cacher might be introduced here. Or, better
## yet, we can plug dedicated data cachers to every place that
## called run_sql_cached.
return run_sql(sql, param, n, with_desc)
global _db_cache
if CFG_ACCESS_CONTROL_LEVEL_SITE == 3:
# do not connect to the database as the site is closed for maintenance:
return []
key = repr((sql, param, n, with_desc))
# Garbage collecting needed?
if len(_db_cache) >= CFG_MISCUTIL_SQL_MAX_CACHED_QUERIES:
_db_cache = {}
# Query already in the cache?
if not _db_cache.has_key(key) or \
(affected_tables and _db_cache[key][1] <= max([get_table_update_time(table) for table in affected_tables])):
# Let's update the cache
result = run_sql(sql, param, n, with_desc)
_db_cache[key] = (result, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
### log_sql_query_cached(key, result, False) ### UNCOMMENT ONLY IF you REALLY want to log all queries
else:
result = _db_cache[key][0]
### log_sql_query_cached(key, result, True) ### UNCOMMENT ONLY IF you REALLY want to log all queries
return result
def run_sql(sql, param=None, n=0, with_desc=0):
"""Run SQL on the server with PARAM and return result.
@param param: tuple of string params to insert in the query (see
notes below)
@param n: number of tuples in result (0 for unbounded)
@param with_desc: if True, will return a DB API 7-tuple describing
columns in query.
@return: If SELECT, SHOW, DESCRIBE statements, return tuples of
data, followed by description if parameter with_desc is
provided. If INSERT, return last row id. Otherwise return
SQL result as provided by database.
@note: When the site is closed for maintenance (as governed by the
config variable CFG_ACCESS_CONTROL_LEVEL_SITE), do not attempt
to run any SQL queries but return empty list immediately.
Useful to be able to have the website up while MySQL database
is down for maintenance, hot copies, table repairs, etc.
@note: In case of problems, exceptions are returned according to
the Python DB API 2.0. The client code can import them from
this file and catch them.
"""
if CFG_ACCESS_CONTROL_LEVEL_SITE == 3:
# do not connect to the database as the site is closed for maintenance:
return []
### log_sql_query(sql, param) ### UNCOMMENT ONLY IF you REALLY want to log all queries
if param:
param = tuple(param)
try:
db = _db_login()
cur = db.cursor()
rc = cur.execute(sql, param)
except OperationalError: # unexpected disconnect, bad malloc error, etc
# FIXME: now reconnect is always forced, we may perhaps want to ping() first?
try:
db = _db_login(relogin=1)
cur = db.cursor()
rc = cur.execute(sql, param)
except OperationalError: # again an unexpected disconnect, bad malloc error, etc
raise
if string.upper(string.split(sql)[0]) in ("SELECT", "SHOW", "DESC", "DESCRIBE"):
if n:
recset = cur.fetchmany(n)
else:
recset = cur.fetchall()
if with_desc:
return recset, cur.description
else:
return recset
else:
if string.upper(string.split(sql)[0]) == "INSERT":
rc = cur.lastrowid
return rc
def run_sql_many(query, params, limit=CFG_MISCUTIL_SQL_RUN_SQL_MANY_LIMIT):
"""Run SQL on the server with PARAM.
This method does executemany and is therefore more efficient than execute
but it has sense only with queries that affect state of a database
(INSERT, UPDATE). That is why the results just count number of affected rows
@param params: tuple of tuple of string params to insert in the query
@param limit: query will be executed in parts when number of
parameters is greater than limit (each iteration runs at most
`limit' parameters)
@return: SQL result as provided by database
"""
i = 0
r = None
while i < len(params):
## make partial query safely (mimicking procedure from run_sql())
try:
db = _db_login()
cur = db.cursor()
rc = cur.executemany(query, params[i:i+limit])
except OperationalError:
try:
db = _db_login(relogin=1)
cur = db.cursor()
rc = cur.executemany(query, params[i:i+limit])
except OperationalError:
raise
## collect its result:
if r is None:
r = rc
else:
r += rc
i += limit
return r
def blob_to_string(ablob):
"""Return string representation of ABLOB. Useful to treat MySQL
BLOBs in the same way for both recent and old MySQLdb versions.
"""
if ablob:
if type(ablob) is str:
# BLOB is already a string in MySQLdb 0.9.2
return ablob
else:
# BLOB is array.array in MySQLdb 1.0.0 and later
return ablob.tostring()
else:
return ablob
def log_sql_query_cached(key, result, hit_p):
"""Log SQL query cached into prefix/var/log/dbquery.log log file. In order
to enable logging of all SQL queries, please uncomment two lines
in run_sql_cached() above. Useful for fine-level debugging only!
"""
from invenio.config import CFG_LOGDIR
from invenio.dateutils import convert_datestruct_to_datetext
from invenio.textutils import indent_text
log_path = CFG_LOGDIR + '/dbquery.log'
date_of_log = convert_datestruct_to_datetext(time.localtime())
message = date_of_log + '-->\n'
message += indent_text('Key:\n' + indent_text(str(key), 2, wrap=True), 2)
message += indent_text('Result:\n' + indent_text(str(result) + (hit_p and ' HIT' or ' MISS'), 2, wrap=True), 2)
message += 'Cached queries: %i\n\n' % len(_db_cache)
try:
log_file = open(log_path, 'a+')
log_file.writelines(message)
log_file.close()
except:
pass
def log_sql_query(sql, param=None):
"""Log SQL query into prefix/var/log/dbquery.log log file. In order
to enable logging of all SQL queries, please uncomment one line
in run_sql() above. Useful for fine-level debugging only!
"""
from invenio.config import CFG_LOGDIR
from invenio.dateutils import convert_datestruct_to_datetext
from invenio.textutils import indent_text
log_path = CFG_LOGDIR + '/dbquery.log'
date_of_log = convert_datestruct_to_datetext(time.localtime())
message = date_of_log + '-->\n'
message += indent_text('Query:\n' + indent_text(str(sql), 2, wrap=True), 2)
message += indent_text('Params:\n' + indent_text(str(param), 2, wrap=True), 2)
message += '-----------------------------\n\n'
try:
log_file = open(log_path, 'a+')
log_file.writelines(message)
log_file.close()
except:
pass
def get_table_update_time(tablename):
"""Return update time of TABLENAME. TABLENAME can contain
wildcard `%' in which case we return the maximum update time
value.
"""
# Note: in order to work with all of MySQL 4.0, 4.1, 5.0, this
# function uses SHOW TABLE STATUS technique with a dirty column
# position lookup to return the correct value. (Making use of
# Index_Length column that is either of type long (when there are
# some indexes defined) or of type None (when there are no indexes
# defined, e.g. table is empty). When we shall use solely
# MySQL-5.0, we can employ a much cleaner technique of using
# SELECT UPDATE_TIME FROM INFORMATION_SCHEMA.TABLES WHERE
# table_name='collection'.
res = run_sql("SHOW TABLE STATUS LIKE %s", (tablename, ))
update_times = [] # store all update times
for row in res:
if type(row[10]) is long or \
row[10] is None:
# MySQL-4.1 and 5.0 have creation_time in 11th position,
# so return next column:
update_times.append(str(row[12]))
else:
# MySQL-4.0 has creation_time in 10th position, which is
# of type datetime.datetime or str (depending on the
# version of MySQLdb), so return next column:
update_times.append(str(row[11]))
return max(update_times)
def get_table_status_info(tablename):
"""Return table status information on TABLENAME. Returned is a
dict with keys like Name, Rows, Data_length, Max_data_length,
etc. If TABLENAME does not exist, return empty dict.
"""
# Note: again a hack so that it works on all MySQL 4.0, 4.1, 5.0
res = run_sql("SHOW TABLE STATUS LIKE %s", (tablename, ))
table_status_info = {} # store all update times
for row in res:
if type(row[10]) is long or \
row[10] is None:
# MySQL-4.1 and 5.0 have creation time in 11th position:
table_status_info['Name'] = row[0]
table_status_info['Rows'] = row[4]
table_status_info['Data_length'] = row[6]
table_status_info['Max_data_length'] = row[8]
table_status_info['Create_time'] = row[11]
table_status_info['Update_time'] = row[12]
else:
# MySQL-4.0 has creation_time in 10th position, which is
# of type datetime.datetime or str (depending on the
# version of MySQLdb):
table_status_info['Name'] = row[0]
table_status_info['Rows'] = row[3]
table_status_info['Data_length'] = row[5]
table_status_info['Max_data_length'] = row[7]
table_status_info['Create_time'] = row[10]
table_status_info['Update_time'] = row[11]
return table_status_info
def serialize_via_marshal(obj):
"""Serialize Python object via marshal into a compressed string."""
return compress(marshal.dumps(obj))
def deserialize_via_marshal(astring):
"""Decompress and deserialize string into a Python object via marshal."""
return marshal.loads(decompress(astring))
try:
import psyco
psyco.bind(serialize_via_marshal)
psyco.bind(deserialize_via_marshal)
except StandardError, e:
pass
def wash_table_column_name(colname):
"""
Evaluate table-column name to see if it is clean.
This function accepts only names containing [a-zA-Z0-9_].
@param colname: The string to be checked
@type colname: str
@return: colname if test passed
@rtype: str
@raise Exception: Raises an exception if colname is invalid.
"""
if re.search('[^\w]', colname):
raise Exception('The table column %s is not valid.' % repr(colname))
return colname
def real_escape_string(unescaped_string):
"""
Escapes special characters in the unescaped string for use in a DB query.
@param unescaped_string: The string to be escaped
@type unescaped_string: str
@return: Returns the escaped string
@rtype: str
"""
connection_object = _db_login()
escaped_string = connection_object.escape_string(unescaped_string)
return escaped_string
| gpl-2.0 |
DangoMelon0701/PyRemote-Sensing | Example codes/4- read_mod_aerosol_and_dump_ascii.py_/read_mod_aerosol_and_dump_ascii.py | 1 | 5281 | #!/usr/bin/python
'''
Module: read_mod_aerosol_and_dump_ascii.py
==========================================================================================
Disclaimer: The code is for demonstration purposes only. Users are responsible to check for accuracy and revise to fit their objective.
Author: Justin Roberts-Pierel, 2015
Organization: NASA ARSET
Purpose: To save a MODIS HDF4 file (or series of files) in ASCII format, saving time, lat, lon, and other SDS dependent on file type
See the README associated with this module for more information.
==========================================================================================
'''
#import necessary modules
from pyhdf import SD
import numpy as np
import time
import calendar
import sys
#This uses the file "fileList.txt", containing the list of files, in order to read the files
try:
fileList=open('fileList.txt','r')
except:
print('Did not find a text file containing file names (perhaps name does not match)')
sys.exit()
#loops through all files listed in the text file
for FILE_NAME in fileList:
FILE_NAME=FILE_NAME.strip()
user_input=input('\nWould you like to process\n' + FILE_NAME + '\n\n(Y/N)')
if(user_input == 'N' or user_input == 'n'):
continue
else:
if '3K' in FILE_NAME: #then this is a 3km MODIS file
print('This is a 3km MODIS file. Saving... ')
#saves all the SDS to be outputted to ASCII in a dictionary
dataFields=dict([(1,'Optical_Depth_Land_And_Ocean'),(2,'Image_Optical_Depth_Land_And_Ocean'),(3,'Land_sea_Flag'),(4,'Land_Ocean_Quality_Flag')])
# The name of the SDS to read
elif 'L2' in FILE_NAME: #Same as above but for 10km MODIS file
print('This is a 10km MODIS file. Saving... ')
dataFields=dict([(1,'Deep_Blue_Aerosol_Optical_Depth_550_Land'),(2,'AOD_550_Dark_Target_Deep_Blue_Combined'),(3,'AOD_550_Dark_Target_Deep_Blue_Combined_QA_Flag')])
else:
print('The file :',FILE_NAME, ' is not a valid MODIS file (or is named incorrectly). \n')
continue
try:
# open the hdf file for reading
hdf=SD.SD(FILE_NAME)
except:
print('Unable to open file: \n' + FILE_NAME + '\n Skipping...')
continue
# Get lat and lon info
lat = hdf.select('Latitude')
lat=(lat.get()).ravel()
latitude = np.array(lat[:])
lon = hdf.select('Longitude')
lon=(lon.get()).ravel()
longitude = np.array(lon[:])
#Get the scan start time from the hdf file. This is in number of seconds since Jan 1, 1993
scan_time=hdf.select('Scan_Start_Time')
scan_time=(scan_time.get()).ravel()
scan_time=scan_time[:]
#get the date info from scan_time
year=np.zeros(scan_time.shape[0])
month=np.zeros(scan_time.shape[0])
day=np.zeros(scan_time.shape[0])
hour=np.zeros(scan_time.shape[0])
min=np.zeros(scan_time.shape[0])
sec=np.zeros(scan_time.shape[0])
#Saves date info for each pixel to be saved later
for i in range(scan_time.shape[0]):
temp=time.gmtime(scan_time[i-1]+calendar.timegm(time.strptime('Dec 31, 1992 @ 23:59:59 UTC', '%b %d, %Y @ %H:%M:%S UTC')))
year[i-1]=temp[0]
month[i-1]=temp[1]
day[i-1]=temp[2]
hour[i-1]=temp[3]
min[i-1]=temp[4]
sec[i-1]=temp[5]
#Begin saving to an output array
end=8+len(dataFields)#this is the number of columns needed (based on number of SDS read)
output=np.array(np.zeros((year.shape[0],end)))
output[0:,0]=year[:]
output[0:,1]=month[:]
output[0:,2]=day[:]
output[0:,3]=hour[:]
output[0:,4]=min[:]
output[0:,5]=sec[:]
output[0:,6]=latitude[:]
output[0:,7]=longitude[:]
#list for the column titles
tempOutput=[]
tempOutput.append('Year')
tempOutput.append('Month')
tempOutput.append('Day')
tempOutput.append('Hour')
tempOutput.append('Minute')
tempOutput.append('Second')
tempOutput.append('Latitude')
tempOutput.append('Longitude')
#This for loop saves all of the SDS in the dictionary at the top (dependent on file type) to the array (with titles)
for i in range(8,end):
SDS_NAME=dataFields[(i-7)] # The name of the sds to read
#get current SDS data, or exit program if the SDS is not found in the file
try:
sds=hdf.select(SDS_NAME)
except:
print('Sorry, your MODIS hdf file does not contain the SDS:',SDS_NAME,'. Please try again with the correct file type.')
continue
#get scale factor for current SDS
attributes=sds.attributes()
scale_factor=attributes['scale_factor']
fillvalue=attributes['_FillValue']
#get SDS data as a vector
data=(sds.get()).ravel()
data=np.array(data[:])
#The next few lines change fillvalue to NaN so that we can multiply valid values by the scale factor, then back to fill values
data=data.astype(float)
data[data==float(fillvalue)]=np.nan
data=data*scale_factor
data[np.isnan(data)]=fillvalue
#the SDS and SDS name are saved to arrays which will be written to the .txt file
output[0:,i]=data
tempOutput.append(SDS_NAME)
#changes list to an array so it can be stacked
tempOutput=np.asarray(tempOutput)
#This stacks the titles on top of the data
output=np.row_stack((tempOutput,output))
#save the new array to a text file, which is the name of the HDF4 file .txt instead of .hdf
np.savetxt('{0}.txt'.format(FILE_NAME[:-4]),output,fmt='%s',delimiter=',')
print('\nAll valid files have been saved successfully.') | mit |
biswajitsahu/kuma | vendor/packages/pygments/styles/paraiso_light.py | 126 | 5645 | # -*- coding: utf-8 -*-
"""
pygments.styles.paraiso_light
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paraíso (Light) by Jan T. Sott
Pygments template by Jan T. Sott (https://github.com/idleberg)
Created with Base16 Builder by Chris Kempson
(https://github.com/chriskempson/base16-builder).
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#e7e9db"
CURRENT_LINE = "#b9b6b0"
SELECTION = "#a39e9b"
FOREGROUND = "#2f1e2e"
COMMENT = "#8d8687"
RED = "#ef6155"
ORANGE = "#f99b15"
YELLOW = "#fec418"
GREEN = "#48b685"
AQUA = "#5bc4bf"
BLUE = "#06b6ef"
PURPLE = "#815ba4"
class ParaisoLightStyle(Style):
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: FOREGROUND, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
| mpl-2.0 |
rgommers/statsmodels | statsmodels/sandbox/nonparametric/smoothers.py | 27 | 12607 | """
This module contains scatterplot smoothers, that is classes
who generate a smooth fit of a set of (x,y) pairs.
"""
# pylint: disable-msg=C0103
# pylint: disable-msg=W0142
# pylint: disable-msg=E0611
# pylint: disable-msg=E1101
from __future__ import print_function
import numpy as np
from . import kernels
#import numbers
#from scipy.linalg import solveh_banded
#from scipy.optimize import golden
#from models import _hbspline # Need to alter setup to be able to import
# extension from models or drop for scipy
#from models.bspline import BSpline, _band2array
class KernelSmoother(object):
"""
1D Kernel Density Regression/Kernel Smoother
Requires:
x - array_like of x values
y - array_like of y values
Kernel - Kernel object, Default is Gaussian.
"""
def __init__(self, x, y, Kernel = None):
if Kernel is None:
Kernel = kernels.Gaussian()
self.Kernel = Kernel
self.x = np.array(x)
self.y = np.array(y)
def fit(self):
pass
def __call__(self, x):
return np.array([self.predict(xx) for xx in x])
def predict(self, x):
"""
Returns the kernel smoothed prediction at x
If x is a real number then a single value is returned.
Otherwise an attempt is made to cast x to numpy.ndarray and an array of
corresponding y-points is returned.
"""
if np.size(x) == 1: # if isinstance(x, numbers.Real):
return self.Kernel.smooth(self.x, self.y, x)
else:
return np.array([self.Kernel.smooth(self.x, self.y, xx) for xx
in np.array(x)])
def conf(self, x):
"""
Returns the fitted curve and 1-sigma upper and lower point-wise
confidence.
These bounds are based on variance only, and do not include the bias.
If the bandwidth is much larger than the curvature of the underlying
funtion then the bias could be large.
x is the points on which you want to evaluate the fit and the errors.
Alternatively if x is specified as a positive integer, then the fit and
confidence bands points will be returned after every
xth sample point - so they are closer together where the data
is denser.
"""
if isinstance(x, int):
sorted_x = np.array(self.x)
sorted_x.sort()
confx = sorted_x[::x]
conffit = self.conf(confx)
return (confx, conffit)
else:
return np.array([self.Kernel.smoothconf(self.x, self.y, xx)
for xx in x])
def var(self, x):
return np.array([self.Kernel.smoothvar(self.x, self.y, xx) for xx in x])
def std(self, x):
return np.sqrt(self.var(x))
class PolySmoother(object):
"""
Polynomial smoother up to a given order.
Fit based on weighted least squares.
The x values can be specified at instantiation or when called.
This is a 3 liner with OLS or WLS, see test.
It's here as a test smoother for GAM
"""
#JP: heavily adjusted to work as plugin replacement for bspline
# smoother in gam.py initalized by function default_smoother
# Only fixed exceptions, I didn't check whether it is statistically
# correctand I think it is not, there are still be some dimension
# problems, and there were some dimension problems initially.
# TODO: undo adjustments and fix dimensions correctly
# comment: this is just like polyfit with initialization options
# and additional results (OLS on polynomial of x (x is 1d?))
def __init__(self, order, x=None):
#order = 4 # set this because we get knots instead of order
self.order = order
#print order, x.shape
self.coef = np.zeros((order+1,), np.float64)
if x is not None:
if x.ndim > 1:
print('Warning: 2d x detected in PolySmoother init, shape:', x.shape)
x=x[0,:] #check orientation
self.X = np.array([x**i for i in range(order+1)]).T
def df_fit(self):
'''alias of df_model for backwards compatibility
'''
return self.df_model()
def df_model(self):
"""
Degrees of freedom used in the fit.
"""
return self.order + 1
def gram(self, d=None):
#fake for spline imitation
pass
def smooth(self,*args, **kwds):
'''alias for fit, for backwards compatibility,
do we need it with different behavior than fit?
'''
return self.fit(*args, **kwds)
def df_resid(self):
"""
Residual degrees of freedom from last fit.
"""
return self.N - self.order - 1
def __call__(self, x=None):
return self.predict(x=x)
def predict(self, x=None):
if x is not None:
#if x.ndim > 1: x=x[0,:] #why this this should select column not row
if x.ndim > 1:
print('Warning: 2d x detected in PolySmoother predict, shape:', x.shape)
x=x[:,0] #TODO: check and clean this up
X = np.array([(x**i) for i in range(self.order+1)])
else: X = self.X
#return np.squeeze(np.dot(X.T, self.coef))
#need to check what dimension this is supposed to be
if X.shape[1] == self.coef.shape[0]:
return np.squeeze(np.dot(X, self.coef))#[0]
else:
return np.squeeze(np.dot(X.T, self.coef))#[0]
def fit(self, y, x=None, weights=None):
self.N = y.shape[0]
if y.ndim == 1:
y = y[:,None]
if weights is None or np.isnan(weights).all():
weights = 1
_w = 1
else:
_w = np.sqrt(weights)[:,None]
if x is None:
if not hasattr(self, "X"):
raise ValueError("x needed to fit PolySmoother")
else:
if x.ndim > 1:
print('Warning: 2d x detected in PolySmoother predict, shape:', x.shape)
#x=x[0,:] #TODO: check orientation, row or col
self.X = np.array([(x**i) for i in range(self.order+1)]).T
#print _w.shape
X = self.X * _w
_y = y * _w#[:,None]
#self.coef = np.dot(L.pinv(X).T, _y[:,None])
#self.coef = np.dot(L.pinv(X), _y)
self.coef = np.linalg.lstsq(X, _y)[0]
self.params = np.squeeze(self.coef)
# comment out for now to remove dependency on _hbspline
##class SmoothingSpline(BSpline):
##
## penmax = 30.
##
## def fit(self, y, x=None, weights=None, pen=0.):
## banded = True
##
## if x is None:
## x = self.tau[(self.M-1):-(self.M-1)] # internal knots
##
## if pen == 0.: # can't use cholesky for singular matrices
## banded = False
##
## if x.shape != y.shape:
## raise ValueError('x and y shape do not agree, by default x are the Bspline\'s internal knots')
##
## bt = self.basis(x)
## if pen >= self.penmax:
## pen = self.penmax
##
## if weights is None:
## weights = np.array(1.)
##
## wmean = weights.mean()
## _w = np.sqrt(weights / wmean)
## bt *= _w
##
## # throw out rows with zeros (this happens at boundary points!)
##
## mask = np.flatnonzero(1 - np.alltrue(np.equal(bt, 0), axis=0))
##
## bt = bt[:, mask]
## y = y[mask]
##
## self.df_total = y.shape[0]
##
## if bt.shape[1] != y.shape[0]:
## raise ValueError("some x values are outside range of B-spline knots")
## bty = np.dot(bt, _w * y)
## self.N = y.shape[0]
## if not banded:
## self.btb = np.dot(bt, bt.T)
## _g = _band2array(self.g, lower=1, symmetric=True)
## self.coef, _, self.rank = L.lstsq(self.btb + pen*_g, bty)[0:3]
## self.rank = min(self.rank, self.btb.shape[0])
## else:
## self.btb = np.zeros(self.g.shape, np.float64)
## nband, nbasis = self.g.shape
## for i in range(nbasis):
## for k in range(min(nband, nbasis-i)):
## self.btb[k, i] = (bt[i] * bt[i+k]).sum()
##
## bty.shape = (1, bty.shape[0])
## self.chol, self.coef = solveh_banded(self.btb +
## pen*self.g,
## bty, lower=1)
##
## self.coef = np.squeeze(self.coef)
## self.resid = np.sqrt(wmean) * (y * _w - np.dot(self.coef, bt))
## self.pen = pen
##
## def gcv(self):
## """
## Generalized cross-validation score of current fit.
## """
##
## norm_resid = (self.resid**2).sum()
## return norm_resid / (self.df_total - self.trace())
##
## def df_resid(self):
## """
## self.N - self.trace()
##
## where self.N is the number of observations of last fit.
## """
##
## return self.N - self.trace()
##
## def df_fit(self):
## """
## = self.trace()
##
## How many degrees of freedom used in the fit?
## """
## return self.trace()
##
## def trace(self):
## """
## Trace of the smoothing matrix S(pen)
## """
##
## if self.pen > 0:
## _invband = _hbspline.invband(self.chol.copy())
## tr = _trace_symbanded(_invband, self.btb, lower=1)
## return tr
## else:
## return self.rank
##
##class SmoothingSplineFixedDF(SmoothingSpline):
## """
## Fit smoothing spline with approximately df degrees of freedom
## used in the fit, i.e. so that self.trace() is approximately df.
##
## In general, df must be greater than the dimension of the null space
## of the Gram inner product. For cubic smoothing splines, this means
## that df > 2.
## """
##
## target_df = 5
##
## def __init__(self, knots, order=4, coef=None, M=None, target_df=None):
## if target_df is not None:
## self.target_df = target_df
## BSpline.__init__(self, knots, order=order, coef=coef, M=M)
## self.target_reached = False
##
## def fit(self, y, x=None, df=None, weights=None, tol=1.0e-03):
##
## df = df or self.target_df
##
## apen, bpen = 0, 1.0e-03
## olddf = y.shape[0] - self.m
##
## if not self.target_reached:
## while True:
## curpen = 0.5 * (apen + bpen)
## SmoothingSpline.fit(self, y, x=x, weights=weights, pen=curpen)
## curdf = self.trace()
## if curdf > df:
## apen, bpen = curpen, 2 * curpen
## else:
## apen, bpen = apen, curpen
## if apen >= self.penmax:
## raise ValueError("penalty too large, try setting penmax higher or decreasing df")
## if np.fabs(curdf - df) / df < tol:
## self.target_reached = True
## break
## else:
## SmoothingSpline.fit(self, y, x=x, weights=weights, pen=self.pen)
##
##class SmoothingSplineGCV(SmoothingSpline):
##
## """
## Fit smoothing spline trying to optimize GCV.
##
## Try to find a bracketing interval for scipy.optimize.golden
## based on bracket.
##
## It is probably best to use target_df instead, as it is
## sometimes difficult to find a bracketing interval.
##
## """
##
## def fit(self, y, x=None, weights=None, tol=1.0e-03,
## bracket=(0,1.0e-03)):
##
## def _gcv(pen, y, x):
## SmoothingSpline.fit(y, x=x, pen=np.exp(pen), weights=weights)
## a = self.gcv()
## return a
##
## a = golden(_gcv, args=(y,x), brack=(-100,20), tol=tol)
##
##def _trace_symbanded(a,b, lower=0):
## """
## Compute the trace(a*b) for two upper or lower banded real symmetric matrices.
## """
##
## if lower:
## t = _zero_triband(a * b, lower=1)
## return t[0].sum() + 2 * t[1:].sum()
## else:
## t = _zero_triband(a * b, lower=0)
## return t[-1].sum() + 2 * t[:-1].sum()
##
##
##
##def _zero_triband(a, lower=0):
## """
## Zero out unnecessary elements of a real symmetric banded matrix.
## """
##
## nrow, ncol = a.shape
## if lower:
## for i in range(nrow): a[i,(ncol-i):] = 0.
## else:
## for i in range(nrow): a[i,0:i] = 0.
## return a
| bsd-3-clause |
EnviroCentre/jython-upgrade | jython/lib/test/test_pep292.py | 103 | 7682 | # Copyright (C) 2004 Python Software Foundation
# Author: barry@python.org (Barry Warsaw)
# License: http://www.opensource.org/licenses/PythonSoftFoundation.php
import unittest
from string import Template
class Bag:
pass
class Mapping:
def __getitem__(self, name):
obj = self
for part in name.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
raise KeyError(name)
return obj
class TestTemplate(unittest.TestCase):
def test_regular_templates(self):
s = Template('$who likes to eat a bag of $what worth $$100')
self.assertEqual(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of ham worth $100')
self.assertRaises(KeyError, s.substitute, dict(who='tim'))
def test_regular_templates_with_braces(self):
s = Template('$who likes ${what} for ${meal}')
d = dict(who='tim', what='ham', meal='dinner')
self.assertEqual(s.substitute(d), 'tim likes ham for dinner')
self.assertRaises(KeyError, s.substitute,
dict(who='tim', what='ham'))
def test_escapes(self):
eq = self.assertEqual
s = Template('$who likes to eat a bag of $$what worth $$100')
eq(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of $what worth $100')
s = Template('$who likes $$')
eq(s.substitute(dict(who='tim', what='ham')), 'tim likes $')
def test_percents(self):
eq = self.assertEqual
s = Template('%(foo)s $foo ${foo}')
d = dict(foo='baz')
eq(s.substitute(d), '%(foo)s baz baz')
eq(s.safe_substitute(d), '%(foo)s baz baz')
def test_stringification(self):
eq = self.assertEqual
s = Template('tim has eaten $count bags of ham today')
d = dict(count=7)
eq(s.substitute(d), 'tim has eaten 7 bags of ham today')
eq(s.safe_substitute(d), 'tim has eaten 7 bags of ham today')
s = Template('tim has eaten ${count} bags of ham today')
eq(s.substitute(d), 'tim has eaten 7 bags of ham today')
def test_tupleargs(self):
eq = self.assertEqual
s = Template('$who ate ${meal}')
d = dict(who=('tim', 'fred'), meal=('ham', 'kung pao'))
eq(s.substitute(d), "('tim', 'fred') ate ('ham', 'kung pao')")
eq(s.safe_substitute(d), "('tim', 'fred') ate ('ham', 'kung pao')")
def test_SafeTemplate(self):
eq = self.assertEqual
s = Template('$who likes ${what} for ${meal}')
eq(s.safe_substitute(dict(who='tim')), 'tim likes ${what} for ${meal}')
eq(s.safe_substitute(dict(what='ham')), '$who likes ham for ${meal}')
eq(s.safe_substitute(dict(what='ham', meal='dinner')),
'$who likes ham for dinner')
eq(s.safe_substitute(dict(who='tim', what='ham')),
'tim likes ham for ${meal}')
eq(s.safe_substitute(dict(who='tim', what='ham', meal='dinner')),
'tim likes ham for dinner')
def test_invalid_placeholders(self):
raises = self.assertRaises
s = Template('$who likes $')
raises(ValueError, s.substitute, dict(who='tim'))
s = Template('$who likes ${what)')
raises(ValueError, s.substitute, dict(who='tim'))
s = Template('$who likes $100')
raises(ValueError, s.substitute, dict(who='tim'))
def test_idpattern_override(self):
class PathPattern(Template):
idpattern = r'[_a-z][._a-z0-9]*'
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = PathPattern('$bag.foo.who likes to eat a bag of $bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
def test_pattern_override(self):
class MyPattern(Template):
pattern = r"""
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@)
"""
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = MyPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
class BadPattern(Template):
pattern = r"""
(?P<badname>.*) |
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@) |
"""
s = BadPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertRaises(ValueError, s.substitute, {})
self.assertRaises(ValueError, s.safe_substitute, {})
def test_unicode_values(self):
s = Template('$who likes $what')
d = dict(who=u't\xffm', what=u'f\xfe\fed')
self.assertEqual(s.substitute(d), u't\xffm likes f\xfe\x0ced')
def test_keyword_arguments(self):
eq = self.assertEqual
s = Template('$who likes $what')
eq(s.substitute(who='tim', what='ham'), 'tim likes ham')
eq(s.substitute(dict(who='tim'), what='ham'), 'tim likes ham')
eq(s.substitute(dict(who='fred', what='kung pao'),
who='tim', what='ham'),
'tim likes ham')
s = Template('the mapping is $mapping')
eq(s.substitute(dict(foo='none'), mapping='bozo'),
'the mapping is bozo')
eq(s.substitute(dict(mapping='one'), mapping='two'),
'the mapping is two')
def test_keyword_arguments_safe(self):
eq = self.assertEqual
raises = self.assertRaises
s = Template('$who likes $what')
eq(s.safe_substitute(who='tim', what='ham'), 'tim likes ham')
eq(s.safe_substitute(dict(who='tim'), what='ham'), 'tim likes ham')
eq(s.safe_substitute(dict(who='fred', what='kung pao'),
who='tim', what='ham'),
'tim likes ham')
s = Template('the mapping is $mapping')
eq(s.safe_substitute(dict(foo='none'), mapping='bozo'),
'the mapping is bozo')
eq(s.safe_substitute(dict(mapping='one'), mapping='two'),
'the mapping is two')
d = dict(mapping='one')
raises(TypeError, s.substitute, d, {})
raises(TypeError, s.safe_substitute, d, {})
def test_delimiter_override(self):
eq = self.assertEqual
raises = self.assertRaises
class AmpersandTemplate(Template):
delimiter = '&'
s = AmpersandTemplate('this &gift is for &{who} &&')
eq(s.substitute(gift='bud', who='you'), 'this bud is for you &')
raises(KeyError, s.substitute)
eq(s.safe_substitute(gift='bud', who='you'), 'this bud is for you &')
eq(s.safe_substitute(), 'this &gift is for &{who} &')
s = AmpersandTemplate('this &gift is for &{who} &')
raises(ValueError, s.substitute, dict(gift='bud', who='you'))
eq(s.safe_substitute(), 'this &gift is for &{who} &')
class PieDelims(Template):
delimiter = '@'
s = PieDelims('@who likes to eat a bag of @{what} worth $100')
self.assertEqual(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of ham worth $100')
def test_main():
from test import test_support
test_classes = [TestTemplate,]
test_support.run_unittest(*test_classes)
if __name__ == '__main__':
test_main()
| mit |
repotvsupertuga/tvsupertuga.repository | instal/script.module.liveresolver/lib/liveresolver/modules/f4mproxy/flvlib/astypes.py | 98 | 8332 | import os
import calendar
import datetime
import logging
from primitives import *
from constants import *
from helpers import OrderedAttrDict, utc
"""
The AS types and their FLV representations.
"""
log = logging.getLogger('flvlib.astypes')
class MalformedFLV(Exception):
pass
# Number
def get_number(f, max_offset=None):
return get_double(f)
def make_number(num):
return make_double(num)
# Boolean
def get_boolean(f, max_offset=None):
value = get_ui8(f)
return bool(value)
def make_boolean(value):
return make_ui8((value and 1) or 0)
# String
def get_string(f, max_offset=None):
# First 16 bits are the string's length
length = get_ui16(f)
# Then comes the string itself
ret = f.read(length)
return ret
def make_string(string):
if isinstance(string, unicode):
# We need a blob, not unicode. Arbitrarily choose UTF-8
string = string.encode('UTF-8')
length = make_ui16(len(string))
return length + string
# Longstring
def get_longstring(f, max_offset=None):
# First 32 bits are the string's length
length = get_ui32(f)
# Then comes the string itself
ret = f.read(length)
return ret
def make_longstring(string):
if isinstance(string, unicode):
# We need a blob, not unicode. Arbitrarily choose UTF-8
string = string.encode('UTF-8')
length = make_ui32(len(string))
return length + string
# ECMA Array
class ECMAArray(OrderedAttrDict):
pass
def get_ecma_array(f, max_offset=None):
length = get_ui32(f)
log.debug("The ECMA array has approximately %d elements", length)
array = ECMAArray()
while True:
if max_offset and (f.tell() == max_offset):
log.debug("Prematurely terminating reading an ECMA array")
break
marker = get_ui24(f)
if marker == 9:
log.debug("Marker!")
break
else:
f.seek(-3, os.SEEK_CUR)
name, value = get_script_data_variable(f, max_offset=max_offset)
array[name] = value
return array
def make_ecma_array(d):
length = make_ui32(len(d))
rest = ''.join([make_script_data_variable(name, value)
for name, value in d.iteritems()])
marker = make_ui24(9)
return length + rest + marker
# Strict Array
def get_strict_array(f, max_offset=None):
length = get_ui32(f)
log.debug("The length is %d", length)
elements = [get_script_data_value(f, max_offset=max_offset)
for _ in xrange(length)]
return elements
def make_strict_array(l):
ret = make_ui32(len(l))
rest = ''.join([make_script_data_value(value) for value in l])
return ret + rest
# Date
def get_date(f, max_offset=None):
timestamp = get_number(f) / 1000.0
# From the following document:
# http://opensource.adobe.com/wiki/download/
# attachments/1114283/amf0_spec_121207.pdf
#
# Section 2.13 Date Type
#
# (...) While the design of this type reserves room for time zone offset
# information, it should not be filled in, nor used (...)
_ignored = get_si16(f)
return datetime.datetime.fromtimestamp(timestamp, utc)
def make_date(date):
if date.tzinfo:
utc_date = date.astimezone(utc)
else:
# assume it's UTC
utc_date = date.replace(tzinfo=utc)
ret = make_number(calendar.timegm(utc_date.timetuple()) * 1000)
offset = 0
return ret + make_si16(offset)
# Null
def get_null(f, max_offset=None):
return None
def make_null(none):
return ''
# Object
class FLVObject(OrderedAttrDict):
pass
def get_object(f, max_offset=None):
ret = FLVObject()
while True:
if max_offset and (f.tell() == max_offset):
log.debug("Prematurely terminating reading an object")
break
marker = get_ui24(f)
if marker == 9:
log.debug("Marker!")
break
else:
f.seek(-3, os.SEEK_CUR)
name, value = get_script_data_variable(f)
setattr(ret, name, value)
return ret
def make_object(obj):
# If the object is iterable, serialize keys/values. If not, fall
# back on iterating over __dict__.
# This makes sure that make_object(get_object(StringIO(blob))) == blob
try:
iterator = obj.iteritems()
except AttributeError:
iterator = obj.__dict__.iteritems()
ret = ''.join([make_script_data_variable(name, value)
for name, value in iterator])
marker = make_ui24(9)
return ret + marker
# MovieClip
class MovieClip(object):
def __init__(self, path):
self.path = path
def __eq__(self, other):
return isinstance(other, MovieClip) and self.path == other.path
def __repr__(self):
return "<MovieClip at %s>" % self.path
def get_movieclip(f, max_offset=None):
ret = get_string(f)
return MovieClip(ret)
def make_movieclip(clip):
return make_string(clip.path)
# Undefined
class Undefined(object):
def __eq__(self, other):
return isinstance(other, Undefined)
def __repr__(self):
return '<Undefined>'
def get_undefined(f, max_offset=None):
return Undefined()
def make_undefined(undefined):
return ''
# Reference
class Reference(object):
def __init__(self, ref):
self.ref = ref
def __eq__(self, other):
return isinstance(other, Reference) and self.ref == other.ref
def __repr__(self):
return "<Reference to %d>" % self.ref
def get_reference(f, max_offset=None):
ret = get_ui16(f)
return Reference(ret)
def make_reference(reference):
return make_ui16(reference.ref)
as_type_to_getter_and_maker = {
VALUE_TYPE_NUMBER: (get_number, make_number),
VALUE_TYPE_BOOLEAN: (get_boolean, make_boolean),
VALUE_TYPE_STRING: (get_string, make_string),
VALUE_TYPE_OBJECT: (get_object, make_object),
VALUE_TYPE_MOVIECLIP: (get_movieclip, make_movieclip),
VALUE_TYPE_NULL: (get_null, make_null),
VALUE_TYPE_UNDEFINED: (get_undefined, make_undefined),
VALUE_TYPE_REFERENCE: (get_reference, make_reference),
VALUE_TYPE_ECMA_ARRAY: (get_ecma_array, make_ecma_array),
VALUE_TYPE_STRICT_ARRAY: (get_strict_array, make_strict_array),
VALUE_TYPE_DATE: (get_date, make_date),
VALUE_TYPE_LONGSTRING: (get_longstring, make_longstring)
}
type_to_as_type = {
bool: VALUE_TYPE_BOOLEAN,
int: VALUE_TYPE_NUMBER,
long: VALUE_TYPE_NUMBER,
float: VALUE_TYPE_NUMBER,
# WARNING: not supporting Longstrings here.
# With a max length of 65535 chars, noone will notice.
str: VALUE_TYPE_STRING,
unicode: VALUE_TYPE_STRING,
list: VALUE_TYPE_STRICT_ARRAY,
dict: VALUE_TYPE_ECMA_ARRAY,
ECMAArray: VALUE_TYPE_ECMA_ARRAY,
datetime.datetime: VALUE_TYPE_DATE,
Undefined: VALUE_TYPE_UNDEFINED,
MovieClip: VALUE_TYPE_MOVIECLIP,
Reference: VALUE_TYPE_REFERENCE,
type(None): VALUE_TYPE_NULL
}
# SCRIPTDATAVARIABLE
def get_script_data_variable(f, max_offset=None):
name = get_string(f)
log.debug("The name is %s", name)
value = get_script_data_value(f, max_offset=max_offset)
log.debug("The value is %r", value)
return (name, value)
def make_script_data_variable(name, value):
log.debug("The name is %s", name)
log.debug("The value is %r", value)
ret = make_string(name) + make_script_data_value(value)
return ret
# SCRIPTDATAVALUE
def get_script_data_value(f, max_offset=None):
value_type = get_ui8(f)
log.debug("The value type is %r", value_type)
try:
get_value = as_type_to_getter_and_maker[value_type][0]
except KeyError:
raise MalformedFLV("Invalid script data value type: %d", value_type)
log.debug("The getter function is %r", get_value)
value = get_value(f, max_offset=max_offset)
return value
def make_script_data_value(value):
value_type = type_to_as_type.get(value.__class__, VALUE_TYPE_OBJECT)
log.debug("The value type is %r", value_type)
# KeyError can't happen here, because we always fall back on
# VALUE_TYPE_OBJECT when determining value_type
make_value = as_type_to_getter_and_maker[value_type][1]
log.debug("The maker function is %r", make_value)
type_tag = make_ui8(value_type)
ret = make_value(value)
return type_tag + ret
| gpl-2.0 |
gregswift/ansible | lib/ansible/plugins/action/unarchive.py | 8 | 4318 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for unarchive operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
copy = boolean(self._task.args.get('copy', True))
creates = self._task.args.get('creates', None)
if source is None or dest is None:
result['failed'] = True
result['msg'] = "src (or content) and dest are required"
return result
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not tmp:
tmp = self._make_tmp_path(remote_user)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
result['skipped'] = True
result['msg'] = "skipped, since %s exists" % creates
return result
dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if copy:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
remote_checksum = self._remote_checksum(dest, all_vars=task_vars)
if remote_checksum == '4':
result['failed'] = True
result['msg'] = "python isn't present on the system. Unable to compute checksum"
return result
elif remote_checksum != '3':
result['failed'] = True
result['msg'] = "dest '%s' must be an existing dir" % dest
return result
if copy:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(tmp, 'source')
self._transfer_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
if copy:
# fix file permissions when the copy is done as a different user
self._fixup_perms(tmp, remote_user, recursive=True)
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
original_basename=os.path.basename(source),
),
)
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
original_basename=os.path.basename(source),
),
)
# execute the unarchive module now, with the updated args
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
return result
| gpl-3.0 |
bocaaust/FreshLife | django_project/env/lib/python2.7/site-packages/pip/req/req_set.py | 79 | 32764 | from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.vcs import vcs
from pip.wheel import Wheel
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
# This check has to come after we filter requirements with the
# environment markers.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specifier == install_req.req.specifier):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder, self.upgrade, require_hashes)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
| apache-2.0 |
Akshay0724/scikit-learn | sklearn/multioutput.py | 23 | 12834 | """
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: Tim Head <betatim@gmail.com>
# Author: Hugo Bowne-Anderson <hugobowne@gmail.com>
# Author: Chris Rivera <chris.richard.rivera@gmail.com>
# Author: Michael Williamson
# Author: James Ashton Nichols <james.ashton.nichols@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta
from .base import BaseEstimator, clone
from .base import RegressorMixin, ClassifierMixin
from .utils import check_array, check_X_y
from .utils.fixes import parallel_helper
from .utils.validation import check_is_fitted, has_fit_parameter
from .utils.metaestimators import if_delegate_has_method
from .externals.joblib import Parallel, delayed
from .externals import six
__all__ = ["MultiOutputRegressor", "MultiOutputClassifier"]
def _fit_estimator(estimator, X, y, sample_weight=None):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight)
else:
estimator.fit(X, y)
return estimator
def _partial_fit_estimator(estimator, X, y, classes=None, sample_weight=None,
first_time=True):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes,
sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
class MultiOutputEstimator(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
@if_delegate_has_method('estimator')
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
classes : list of numpy arrays, shape (n_outputs)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
first_time = not hasattr(self, 'estimators_')
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X, y[:, i],
classes[i] if classes is not None else None,
sample_weight, first_time) for i in range(y.shape[1]))
return self
def fit(self, X, y, sample_weight=None):
""" Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
Returns self.
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement a fit method")
X, y = check_X_y(X, y,
multi_output=True,
accept_sparse=True)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi-output regression but has only one.")
if (sample_weight is not None and
not has_fit_parameter(self.estimator, 'sample_weight')):
raise ValueError("Underlying estimator does not support"
" sample weights.")
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight)
for i in range(y.shape[1]))
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
Returns
-------
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimator, "predict"):
raise ValueError("The base estimator should implement a predict method")
X = check_array(X, accept_sparse=True)
y = Parallel(n_jobs=self.n_jobs)(
delayed(parallel_helper)(e, 'predict', X)
for e in self.estimators_)
return np.asarray(y).T
class MultiOutputRegressor(MultiOutputEstimator, RegressorMixin):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and `predict`.
n_jobs : int, optional, default=1
The number of jobs to run in parallel for `fit`. If -1,
then the number of jobs is set to the number of cores.
When individual estimators are fast to train or predict
using `n_jobs>1` can result in slower performance due
to the overhead of spawning processes.
"""
def __init__(self, estimator, n_jobs=1):
super(MultiOutputRegressor, self).__init__(estimator, n_jobs)
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : (sparse) array-like, shape (n_samples, n_features)
Data.
y : (sparse) array-like, shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like, shape = (n_samples) or None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
Returns self.
"""
super(MultiOutputRegressor, self).partial_fit(
X, y, sample_weight=sample_weight)
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Notes
-----
R^2 is calculated by weighting all the targets equally using
`multioutput='uniform_average'`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Test samples.
y : array-like, shape (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
# XXX remove in 0.19 when r2_score default for multioutput changes
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='uniform_average')
class MultiOutputClassifier(MultiOutputEstimator, ClassifierMixin):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing `fit`, `score` and `predict_proba`.
n_jobs : int, optional, default=1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
The number of jobs to use for the computation.
It does each target variable in y in parallel.
Attributes
----------
estimators_ : list of `n_output` estimators
Estimators used for predictions.
"""
def __init__(self, estimator, n_jobs=1):
super(MultiOutputClassifier, self).__init__(estimator, n_jobs)
def predict_proba(self, X):
"""Probability estimates.
Returns prediction probabilites for each class of each output.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimator, "predict_proba"):
raise ValueError("The base estimator should implement"
"predict_proba method")
results = [estimator.predict_proba(X) for estimator in
self.estimators_]
return results
def score(self, X, y):
""""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Test samples
y : array-like, shape [n_samples, n_outputs]
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self, 'estimators_')
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError("y must have at least two dimensions for "
"multi target classification but has only one")
if y.shape[1] != n_outputs_:
raise ValueError("The number of outputs of Y for fit {0} and"
" score {1} should be same".
format(n_outputs_, y.shape[1]))
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
| bsd-3-clause |
archf/ansible | test/units/modules/network/cumulus/test_nclu.py | 55 | 9561 | # -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import unittest
from ansible.module_utils.basic import *
from ansible.modules.network.cumulus import nclu
class FakeModule(object):
"""Fake NCLU module to check the logic of the ansible module.
We have two sets of tests: fake and real. Real tests only run if
NCLU is installed on the testing machine (it should be a Cumulus VX
VM or something like that).
Fake tests are used to test the logic of the ansible module proper - that
the right things are done when certain feedback is received.
Real tests are used to test regressions against versions of NCLU. This
FakeModule mimics the output that is used for screenscraping. If the real
output differs, the real tests will catch that.
To prepare a VX:
sudo apt-get update
sudo apt-get install python-setuptools git gcc python-dev libssl-dev
sudo easy_install pip
sudo pip install ansible nose coverage
# git the module and cd to the directory
nosetests --with-coverage --cover-package=nclu --cover-erase --cover-branches
If a real test fails, it means that there is a risk of a version split, and
that changing the module will break for old versions of NCLU if not careful.
"""
def __init__(self, **kwargs):
self.reset()
def exit_json(self, **kwargs):
self.exit_code = kwargs
def fail_json(self, **kwargs):
self.fail_code = kwargs
def run_command(self, command):
"""Run an NCLU command"""
self.command_history.append(command)
if command == "/usr/bin/net pending":
return (0, self.pending, "")
elif command == "/usr/bin/net abort":
self.pending = ""
return (0, "", "")
elif command.startswith("/usr/bin/net commit"):
if self.pending:
self.last_commit = self.pending
self.pending = ""
return (0, "", "")
else:
return (0, "commit ignored...there were no pending changes", "")
elif command == "/usr/bin/net show commit last":
return (0, self.last_commit, "")
else:
self.pending += command
return self.mocks.get(command, (0, "", ""))
def mock_output(self, command, _rc, output, _err):
"""Prepare a command to mock certain output"""
self.mocks[command] = (_rc, output, _err)
def reset(self):
self.params = {}
self.exit_code = {}
self.fail_code = {}
self.command_history = []
self.mocks = {}
self.pending = ""
self.last_commit = ""
def skipUnlessNcluInstalled(original_function):
if os.path.isfile('/usr/bin/net'):
return original_function
else:
return unittest.skip('only run if nclu is installed')
class TestNclu(unittest.TestCase):
def test_command_helper(self):
module = FakeModule()
module.mock_output("/usr/bin/net add int swp1", 0, "", "")
result = nclu.command_helper(module, 'add int swp1', 'error out')
self.assertEqual(module.command_history[-1], "/usr/bin/net add int swp1")
self.assertEqual(result, "")
def test_command_helper_error_code(self):
module = FakeModule()
module.mock_output("/usr/bin/net fake fail command", 1, "", "")
result = nclu.command_helper(module, 'fake fail command', 'error out')
self.assertEqual(module.fail_code, {'msg': "error out"})
def test_command_helper_error_msg(self):
module = FakeModule()
module.mock_output("/usr/bin/net fake fail command", 0,
"ERROR: Command not found", "")
result = nclu.command_helper(module, 'fake fail command', 'error out')
self.assertEqual(module.fail_code, {'msg': "error out"})
def test_command_helper_no_error_msg(self):
module = FakeModule()
module.mock_output("/usr/bin/net fake fail command", 0,
"ERROR: Command not found", "")
result = nclu.command_helper(module, 'fake fail command')
self.assertEqual(module.fail_code, {'msg': "ERROR: Command not found"})
def test_empty_run(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, None, None, False, False, False, "")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net pending'])
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, False)
def test_command_list(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, ['add int swp1', 'add int swp2'],
None, False, False, False, "")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending'])
self.assertNotEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_command_list_commit(self):
module = FakeModule()
changed, output = nclu.run_nclu(module,
['add int swp1', 'add int swp2'],
None, True, False, False, "committed")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending',
"/usr/bin/net commit description 'committed'",
'/usr/bin/net show commit last'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_command_atomic(self):
module = FakeModule()
changed, output = nclu.run_nclu(module,
['add int swp1', 'add int swp2'],
None, False, True, False, "atomically")
self.assertEqual(module.command_history, ['/usr/bin/net abort',
'/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending',
"/usr/bin/net commit description 'atomically'",
'/usr/bin/net show commit last'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_command_abort_first(self):
module = FakeModule()
module.pending = "dirty"
nclu.run_nclu(module, None, None, False, False, True, "")
self.assertEqual(len(module.pending), 0)
def test_command_template_commit(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, None,
" add int swp1\n add int swp2",
True, False, False, "committed")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net add int swp1',
'/usr/bin/net add int swp2',
'/usr/bin/net pending',
"/usr/bin/net commit description 'committed'",
'/usr/bin/net show commit last'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, True)
def test_commit_ignored(self):
module = FakeModule()
changed, output = nclu.run_nclu(module, None, None, True, False, False, "ignore me")
self.assertEqual(module.command_history, ['/usr/bin/net pending',
'/usr/bin/net pending',
"/usr/bin/net commit description 'ignore me'",
'/usr/bin/net abort'])
self.assertEqual(len(module.pending), 0)
self.assertEqual(module.fail_code, {})
self.assertEqual(changed, False)
| gpl-3.0 |
papouso/odoo | addons/share/wizard/share_wizard.py | 182 | 51223 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import random
import time
import uuid
from openerp import SUPERUSER_ID
import simplejson
from openerp import api
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv import expression
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
import openerp
_logger = logging.getLogger(__name__)
FULL_ACCESS = ('perm_read', 'perm_write', 'perm_create', 'perm_unlink')
READ_WRITE_ACCESS = ('perm_read', 'perm_write')
READ_ONLY_ACCESS = ('perm_read',)
UID_ROOT = 1
# Pseudo-domain to represent an empty filter, constructed using
# osv.expression's DUMMY_LEAF
DOMAIN_ALL = [(1, '=', 1)]
# A good selection of easy to read password characters (e.g. no '0' vs 'O', etc.)
RANDOM_PASS_CHARACTERS = 'aaaabcdeeeefghjkmnpqrstuvwxyzAAAABCDEEEEFGHJKLMNPQRSTUVWXYZ23456789'
def generate_random_pass():
return ''.join(random.SystemRandom().sample(RANDOM_PASS_CHARACTERS,10))
class share_wizard(osv.TransientModel):
_name = 'share.wizard'
_description = 'Share Wizard'
def _assert(self, condition, error_message, context=None):
"""Raise a user error with the given message if condition is not met.
The error_message should have been translated with _().
"""
if not condition:
raise osv.except_osv(_('Sharing access cannot be created.'), error_message)
def has_group(self, cr, uid, module, group_xml_id, context=None):
"""Returns True if current user is a member of the group identified by the module, group_xml_id pair."""
# if the group was deleted or does not exist, we say NO (better safe than sorry)
try:
model, group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, module, group_xml_id)
except ValueError:
return False
return group_id in self.pool.get('res.users').read(cr, uid, [uid], ['groups_id'], context=context)[0]['groups_id']
def has_share(self, cr, uid, unused_param, context=None):
return self.has_group(cr, uid, module='share', group_xml_id='group_share_user', context=context)
def _user_type_selection(self, cr, uid, context=None):
"""Selection values may be easily overridden/extended via inheritance"""
return [('embedded', _('Direct link or embed code')), ('emails',_('Emails')), ]
"""Override of create() to auto-compute the action name"""
def create(self, cr, uid, values, context=None):
if 'action_id' in values and not 'name' in values:
action = self.pool.get('ir.actions.actions').browse(cr, uid, values['action_id'], context=context)
values['name'] = action.name
return super(share_wizard,self).create(cr, uid, values, context=context)
@api.cr_uid_ids_context
def share_url_template(self, cr, uid, _ids, context=None):
# NOTE: take _ids in parameter to allow usage through browse_record objects
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='', context=context)
if base_url:
base_url += '/login?db=%(dbname)s&login=%(login)s&key=%(password)s'
extra = context and context.get('share_url_template_extra_arguments')
if extra:
base_url += '&' + '&'.join('%s=%%(%s)s' % (x,x) for x in extra)
hash_ = context and context.get('share_url_template_hash_arguments')
if hash_:
base_url += '#' + '&'.join('%s=%%(%s)s' % (x,x) for x in hash_)
return base_url
def _share_root_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
data = dict(dbname=cr.dbname, login='', password='')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = this.share_url_template() % data
return result
def _generate_embedded_code(self, wizard, options=None):
cr, uid, context = wizard.env.args
if options is None:
options = {}
js_options = {}
title = options['title'] if 'title' in options else wizard.embed_option_title
search = (options['search'] if 'search' in options else wizard.embed_option_search) if wizard.access_mode != 'readonly' else False
if not title:
js_options['display_title'] = False
if search:
js_options['search_view'] = True
js_options_str = (', ' + simplejson.dumps(js_options)) if js_options else ''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default=None, context=context)
user = wizard.result_line_ids[0]
return """
<script type="text/javascript" src="%(base_url)s/web/webclient/js"></script>
<script type="text/javascript">
new openerp.init(%(init)s).web.embed(%(server)s, %(dbname)s, %(login)s, %(password)s,%(action)d%(options)s);
</script> """ % {
'init': simplejson.dumps(openerp.conf.server_wide_modules),
'base_url': base_url or '',
'server': simplejson.dumps(base_url),
'dbname': simplejson.dumps(cr.dbname),
'login': simplejson.dumps(user.login),
'password': simplejson.dumps(user.password),
'action': user.user_id.action_id.id,
'options': js_options_str,
}
def _embed_code(self, cr, uid, ids, _fn, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
result[this.id] = self._generate_embedded_code(this)
return result
def _embed_url(self, cr, uid, ids, _fn, _args, context=None):
if context is None:
context = {}
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
if this.result_line_ids:
ctx = dict(context, share_url_template_hash_arguments=['action'])
user = this.result_line_ids[0]
data = dict(dbname=cr.dbname, login=user.login, password=user.password, action=this.action_id.id)
result[this.id] = this.share_url_template(context=ctx) % data
return result
_columns = {
'action_id': fields.many2one('ir.actions.act_window', 'Action to share', required=True,
help="The action that opens the screen containing the data you wish to share."),
'view_type': fields.char('Current View Type', required=True),
'domain': fields.char('Domain', help="Optional domain for further data filtering"),
'user_type': fields.selection(lambda s, *a, **k: s._user_type_selection(*a, **k),'Sharing method', required=True,
help="Select the type of user(s) you would like to share data with."),
'new_users': fields.text("Emails"),
'email_1': fields.char('New user email', size=64),
'email_2': fields.char('New user email', size=64),
'email_3': fields.char('New user email', size=64),
'invite': fields.boolean('Invite users to OpenSocial record'),
'access_mode': fields.selection([('readonly','Can view'),('readwrite','Can edit')],'Access Mode', required=True,
help="Access rights to be granted on the shared documents."),
'result_line_ids': fields.one2many('share.wizard.result.line', 'share_wizard_id', 'Summary', readonly=True),
'share_root_url': fields.function(_share_root_url, string='Share Access URL', type='char', readonly=True,
help='Main access page for users that are granted shared access'),
'name': fields.char('Share Title', required=True, help="Title for the share (displayed to users as menu and shortcut name)"),
'record_name': fields.char('Record name', help="Name of the shared record, if sharing a precise record"),
'message': fields.text("Personal Message", help="An optional personal message, to be included in the email notification."),
'embed_code': fields.function(_embed_code, type='text', string='Code',
help="Embed this code in your documents to provide a link to the "\
"shared document."),
'embed_option_title': fields.boolean('Display title'),
'embed_option_search': fields.boolean('Display search view'),
'embed_url': fields.function(_embed_url, string='Share URL', size=512, type='char', readonly=True),
}
_defaults = {
'view_type': 'page',
'user_type' : 'embedded',
'invite': False,
'domain': lambda self, cr, uid, context, *a: context.get('domain', '[]'),
'action_id': lambda self, cr, uid, context, *a: context.get('action_id'),
'access_mode': 'readwrite',
'embed_option_title': True,
'embed_option_search': True,
}
def has_email(self, cr, uid, context=None):
return bool(self.pool.get('res.users').browse(cr, uid, uid, context=context).email)
def go_step_1(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr,uid,ids,context)[0]
if wizard_data.user_type == 'emails' and not self.has_email(cr, uid, context=context):
raise osv.except_osv(_('No email address configured'),
_('You must configure your email address in the user preferences before using the Share button.'))
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'action_share_wizard_step1')
action = self.pool[model].read(cr, uid, [res_id], context=context)[0]
action['res_id'] = ids[0]
action.pop('context', '')
return action
def _create_share_group(self, cr, uid, wizard_data, context=None):
group_obj = self.pool.get('res.groups')
share_group_name = '%s: %s (%d-%s)' %('Shared', wizard_data.name, uid, time.time())
values = {'name': share_group_name, 'share': True}
try:
implied_group_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'group_shared')[1]
except ValueError:
implied_group_id = None
if implied_group_id:
values['implied_ids'] = [(4, implied_group_id)]
# create share group without putting admin in it
return group_obj.create(cr, UID_ROOT, values, {'noadmin': True})
def _create_new_share_users(self, cr, uid, wizard_data, group_id, context=None):
"""Create one new res.users record for each email address provided in
wizard_data.new_users, ignoring already existing users.
Populates wizard_data.result_line_ids with one new line for
each user (existing or not). New users will also have a value
for the password field, so they can receive it by email.
Returns the ids of the created users, and the ids of the
ignored, existing ones."""
context = dict(context or {})
user_obj = self.pool.get('res.users')
current_user = user_obj.browse(cr, UID_ROOT, uid, context=context)
# modify context to disable shortcuts when creating share users
context['noshortcut'] = True
context['no_reset_password'] = True
created_ids = []
existing_ids = []
if wizard_data.user_type == 'emails':
# get new user list from email data
new_users = (wizard_data.new_users or '').split('\n')
new_users += [wizard_data.email_1 or '', wizard_data.email_2 or '', wizard_data.email_3 or '']
for new_user in new_users:
# Ignore blank lines
new_user = new_user.strip()
if not new_user: continue
# Ignore the user if it already exists.
if not wizard_data.invite:
existing = user_obj.search(cr, UID_ROOT, [('login', '=', new_user)])
else:
existing = user_obj.search(cr, UID_ROOT, [('email', '=', new_user)])
existing_ids.extend(existing)
if existing:
new_line = { 'user_id': existing[0],
'newly_created': False}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
continue
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_user,
'password': new_pass,
'name': new_user,
'email': new_user,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
elif wizard_data.user_type == 'embedded':
new_login = 'embedded-%s' % (uuid.uuid4().hex,)
new_pass = generate_random_pass()
user_id = user_obj.create(cr, UID_ROOT, {
'login': new_login,
'password': new_pass,
'name': new_login,
'groups_id': [(6,0,[group_id])],
'company_id': current_user.company_id.id,
'company_ids': [(6, 0, [current_user.company_id.id])],
}, context)
new_line = { 'user_id': user_id,
'password': new_pass,
'newly_created': True}
wizard_data.write({'result_line_ids': [(0,0,new_line)]})
created_ids.append(user_id)
return created_ids, existing_ids
def _create_action(self, cr, uid, values, context=None):
if context is None:
context = {}
new_context = context.copy()
for key in context:
if key.startswith('default_'):
del new_context[key]
action_id = self.pool.get('ir.actions.act_window').create(cr, UID_ROOT, values, new_context)
return action_id
def _cleanup_action_context(self, context_str, user_id):
"""Returns a dict representing the context_str evaluated (safe_eval) as
a dict where items that are not useful for shared actions
have been removed. If the evaluation of context_str as a
dict fails, context_str is returned unaltered.
:param user_id: the integer uid to be passed as 'uid' in the
evaluation context
"""
result = False
if context_str:
try:
context = safe_eval(context_str, tools.UnquoteEvalContext(), nocopy=True)
result = dict(context)
for key in context:
# Remove all context keys that seem to toggle default
# filters based on the current user, as it makes no sense
# for shared users, who would not see any data by default.
if key and key.startswith('search_default_') and 'user_id' in key:
result.pop(key)
except Exception:
# Note: must catch all exceptions, as UnquoteEvalContext may cause many
# different exceptions, as it shadows builtins.
_logger.debug("Failed to cleanup action context as it does not parse server-side", exc_info=True)
result = context_str
return result
def _shared_action_def(self, cr, uid, wizard_data, context=None):
copied_action = wizard_data.action_id
if wizard_data.access_mode == 'readonly':
view_mode = wizard_data.view_type
view_id = copied_action.view_id.id if copied_action.view_id.type == wizard_data.view_type else False
else:
view_mode = copied_action.view_mode
view_id = copied_action.view_id.id
action_def = {
'name': wizard_data.name,
'domain': copied_action.domain,
'context': self._cleanup_action_context(wizard_data.action_id.context, uid),
'res_model': copied_action.res_model,
'view_mode': view_mode,
'view_type': copied_action.view_type,
'search_view_id': copied_action.search_view_id.id if wizard_data.access_mode != 'readonly' else False,
'view_id': view_id,
'auto_search': True,
}
if copied_action.view_ids:
action_def['view_ids'] = [(0,0,{'sequence': x.sequence,
'view_mode': x.view_mode,
'view_id': x.view_id.id })
for x in copied_action.view_ids
if (wizard_data.access_mode != 'readonly' or x.view_mode == wizard_data.view_type)
]
return action_def
def _setup_action_and_shortcut(self, cr, uid, wizard_data, user_ids, make_home, context=None):
"""Create a shortcut to reach the shared data, as well as the corresponding action, for
each user in ``user_ids``, and assign it as their home action if ``make_home`` is True.
Meant to be overridden for special cases.
"""
values = self._shared_action_def(cr, uid, wizard_data, context=None)
user_obj = self.pool.get('res.users')
for user_id in user_ids:
action_id = self._create_action(cr, user_id, values)
if make_home:
# We do this only for new share users, as existing ones already have their initial home
# action. Resetting to the default menu does not work well as the menu is rather empty
# and does not contain the shortcuts in most cases.
user_obj.write(cr, UID_ROOT, [user_id], {'action_id': action_id})
def _get_recursive_relations(self, cr, uid, model, ttypes, relation_fields=None, suffix=None, context=None):
"""Returns list of tuples representing recursive relationships of type ``ttypes`` starting from
model with ID ``model_id``.
:param model: browsable model to start loading relationships from
:param ttypes: list of relationship types to follow (e.g: ['one2many','many2many'])
:param relation_fields: list of previously followed relationship tuples - to avoid duplicates
during recursion
:param suffix: optional suffix to append to the field path to reach the main object
"""
if relation_fields is None:
relation_fields = []
local_rel_fields = []
models = [x[1].model for x in relation_fields]
model_obj = self.pool.get('ir.model')
model_osv = self.pool[model.model]
for field in model_osv._fields.itervalues():
ftype = field.type
relation_field = None
if ftype in ttypes and field.comodel_name not in models:
relation_model_id = model_obj.search(cr, UID_ROOT, [('model','=',field.comodel_name)])[0]
relation_model_browse = model_obj.browse(cr, UID_ROOT, relation_model_id, context=context)
relation_osv = self.pool[field.comodel_name]
#skip virtual one2many fields (related, ...) as there is no reverse relationship
if ftype == 'one2many' and field.inverse_name:
# don't record reverse path if it's not a real m2o (that happens, but rarely)
dest_fields = relation_osv._fields
reverse_rel = field.inverse_name
if reverse_rel in dest_fields and dest_fields[reverse_rel].type == 'many2one':
relation_field = ('%s.%s'%(reverse_rel, suffix)) if suffix else reverse_rel
local_rel_fields.append((relation_field, relation_model_browse))
for parent in relation_osv._inherits:
if parent not in models:
parent_model = self.pool[parent]
parent_fields = parent_model._fields
parent_model_browse = model_obj.browse(cr, UID_ROOT,
model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
if relation_field and field.inverse_name in parent_fields:
# inverse relationship is available in the parent
local_rel_fields.append((relation_field, parent_model_browse))
else:
# TODO: can we setup a proper rule to restrict inherited models
# in case the parent does not contain the reverse m2o?
local_rel_fields.append((None, parent_model_browse))
if relation_model_id != model.id and ftype in ['one2many', 'many2many']:
local_rel_fields += self._get_recursive_relations(cr, uid, relation_model_browse,
[ftype], relation_fields + local_rel_fields, suffix=relation_field, context=context)
return local_rel_fields
def _get_relationship_classes(self, cr, uid, model, context=None):
"""Computes the *relationship classes* reachable from the given
model. The 4 relationship classes are:
- [obj0]: the given model itself (and its parents via _inherits, if any)
- [obj1]: obj0 and all other models recursively accessible from
obj0 via one2many relationships
- [obj2]: obj0 and all other models recursively accessible from
obj0 via one2many and many2many relationships
- [obj3]: all models recursively accessible from obj1 via many2one
relationships
Each class is returned as a list of pairs [(field,model_browse)], where
``model`` is the browse_record of a reachable ir.model, and ``field`` is
the dot-notation reverse relationship path coming from that model to obj0,
or None if there is no reverse path.
:return: ([obj0], [obj1], [obj2], [obj3])
"""
# obj0 class and its parents
obj0 = [(None, model)]
model_obj = self.pool[model.model]
ir_model_obj = self.pool.get('ir.model')
for parent in model_obj._inherits:
parent_model_browse = ir_model_obj.browse(cr, UID_ROOT,
ir_model_obj.search(cr, UID_ROOT, [('model','=',parent)]))[0]
obj0 += [(None, parent_model_browse)]
obj1 = self._get_recursive_relations(cr, uid, model, ['one2many'], relation_fields=obj0, context=context)
obj2 = self._get_recursive_relations(cr, uid, model, ['one2many', 'many2many'], relation_fields=obj0, context=context)
obj3 = self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
for dummy, model in obj1:
obj3 += self._get_recursive_relations(cr, uid, model, ['many2one'], relation_fields=obj0, context=context)
return obj0, obj1, obj2, obj3
def _get_access_map_for_groups_and_models(self, cr, uid, group_ids, model_ids, context=None):
model_access_obj = self.pool.get('ir.model.access')
user_right_ids = model_access_obj.search(cr, uid,
[('group_id', 'in', group_ids), ('model_id', 'in', model_ids)],
context=context)
user_access_matrix = {}
if user_right_ids:
for access_right in model_access_obj.browse(cr, uid, user_right_ids, context=context):
access_line = user_access_matrix.setdefault(access_right.model_id.model, set())
for perm in FULL_ACCESS:
if getattr(access_right, perm, 0):
access_line.add(perm)
return user_access_matrix
def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):
"""Adds access rights to group_id on object models referenced in ``fields_relations``,
intersecting with access rights of current user to avoid granting too much rights
"""
model_access_obj = self.pool.get('ir.model.access')
user_obj = self.pool.get('res.users')
target_model_ids = [x[1].id for x in fields_relations]
perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS
current_user = user_obj.browse(cr, uid, uid, context=context)
current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[x.id for x in current_user.groups_id], target_model_ids, context=context)
group_access_map = self._get_access_map_for_groups_and_models(cr, uid,
[group_id], target_model_ids, context=context)
_logger.debug("Current user access matrix: %r", current_user_access_map)
_logger.debug("New group current access matrix: %r", group_access_map)
# Create required rights if allowed by current user rights and not
# already granted
for dummy, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message', 'mail.notification', 'res.company']: continue
values = {
'name': _('Copied access for sharing'),
'group_id': group_id,
'model_id': model.id,
}
current_user_access_line = current_user_access_map.get(model.model,set())
existing_group_access_line = group_access_map.get(model.model,set())
need_creation = False
for perm in perms_to_add:
if perm in current_user_access_line \
and perm not in existing_group_access_line:
values.update({perm:True})
group_access_map.setdefault(model.model, set()).add(perm)
need_creation = True
if need_creation:
model_access_obj.create(cr, UID_ROOT, values)
_logger.debug("Creating access right for model %s with values: %r", model.model, values)
def _link_or_copy_current_user_rules(self, cr, current_user, group_id, fields_relations, context=None):
rule_obj = self.pool.get('ir.rule')
rules_done = set()
for group in current_user.groups_id:
for dummy, model in fields_relations:
for rule in group.rule_groups:
if rule.id in rules_done:
continue
rules_done.add(rule.id)
if rule.model_id.id == model.id:
if 'user.' in rule.domain_force:
# Above pattern means there is likely a condition
# specific to current user, so we must copy the rule using
# the evaluated version of the domain.
# And it's better to copy one time too much than too few
rule_obj.copy(cr, UID_ROOT, rule.id, default={
'name': '%s %s' %(rule.name, _('(Copy for sharing)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain, # evaluated version!
})
_logger.debug("Copying rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
else:
# otherwise we can simply link the rule to keep it dynamic
rule_obj.write(cr, SUPERUSER_ID, [rule.id], {
'groups': [(4,group_id)]
})
_logger.debug("Linking rule %s (%s) on model %s with domain: %s", rule.name, rule.id, model.model, rule.domain_force)
def _check_personal_rule_or_duplicate(self, cr, group_id, rule, context=None):
"""Verifies that the given rule only belongs to the given group_id, otherwise
duplicate it for the current group, and unlink the previous one.
The duplicated rule has the original domain copied verbatim, without
any evaluation.
Returns the final rule to use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(safe_eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(safe_eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message', 'mail.notification', 'res.company']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Share/User group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# new users by default receive all notifications by email
# A.
if not wizard_data.invite:
self.send_emails(cr, uid, wizard_data, context=context)
# B.
else:
# Invite (OpenSocial): automatically subscribe users to the record
res_id = 0
for cond in safe_eval(main_domain):
if cond[0] == 'id':
res_id = cond[2]
# Record id not found: issue
if res_id <= 0:
raise osv.except_osv(_('Record id not found'), _('The share engine has not been able to fetch a record_id for your invitation.'))
self.pool[model.model].message_subscribe(cr, uid, [res_id], new_ids + existing_ids, context=context)
# self.send_invite_email(cr, uid, wizard_data, context=context)
# self.send_invite_note(cr, uid, model.model, res_id, wizard_data, context=context)
# CLOSE
# A. Not invite: as before
# B. Invite: skip summary screen, get back to the record
# A.
if not wizard_data.invite:
dummy, step2_form_view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'share', 'share_step2_form')
return {
'name': _('Shared access created!'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'share.wizard',
'view_id': False,
'res_id': ids[0],
'views': [(step2_form_view_id, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
# B.
else:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': model.model,
'view_id': False,
'res_id': res_id,
'views': [(False, 'form'), (False, 'tree'), (False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def send_invite_note(self, cr, uid, model_name, res_id, wizard_data, context=None):
subject = _('Invitation')
body = 'has been <b>shared</b> with'
tmp_idx = 0
for result_line in wizard_data.result_line_ids:
body += ' @%s' % (result_line.user_id.login)
if tmp_idx < len(wizard_data.result_line_ids)-2:
body += ','
elif tmp_idx == len(wizard_data.result_line_ids)-2:
body += ' and'
body += '.'
return self.pool[model_name].message_post(cr, uid, [res_id], body=body, context=context)
def send_invite_email(self, cr, uid, wizard_data, context=None):
# TDE Note: not updated because will disappear
message_obj = self.pool.get('mail.message')
notification_obj = self.pool.get('mail.notification')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = _('Invitation to collaborate about %s') % (wizard_data.record_name)
body = _("Hello,\n\n")
body += _("I have shared %s (%s) with you!\n\n") % (wizard_data.record_name, wizard_data.name)
if wizard_data.message:
body += "%s\n\n" % (wizard_data.message)
if result_line.newly_created:
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s" % (_("Username"), result_line.user_id.login) + "\n"
body += "%s: %s" % (_("Password"), result_line.password) + "\n"
body += "%s: %s" % (_("Database"), cr.dbname) + "\n"
body += _("The documents have been automatically added to your subscriptions.\n\n")
body += '%s\n\n' % ((user.signature or ''))
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on https://www.odoo.com.")
msg_id = message_obj.schedule_with_attach(cr, uid, user.email, [email_to], subject, body, model='', context=context)
notification_obj.create(cr, uid, {'user_id': result_line.user_id.id, 'message_id': msg_id}, context=context)
def send_emails(self, cr, uid, wizard_data, context=None):
_logger.info('Sending share notifications by email...')
mail_mail = self.pool.get('mail.mail')
user = self.pool.get('res.users').browse(cr, UID_ROOT, uid)
if not user.email:
raise osv.except_osv(_('Email Required'), _('The current user must have an email address configured in User Preferences to be able to send outgoing emails.'))
# TODO: also send an HTML version of this mail
mail_ids = []
for result_line in wizard_data.result_line_ids:
email_to = result_line.user_id.email
if not email_to:
continue
subject = wizard_data.name
body = _("Hello,\n\n")
body += _("I've shared %s with you!\n\n") % wizard_data.name
body += _("The documents are not attached, you can view them online directly on my Odoo server at:\n %s\n\n") % (result_line.share_url)
if wizard_data.message:
body += '%s\n\n' % (wizard_data.message)
if result_line.newly_created:
body += _("These are your credentials to access this protected area:\n")
body += "%s: %s\n" % (_("Username"), result_line.user_id.login)
body += "%s: %s\n" % (_("Password"), result_line.password)
body += "%s: %s\n" % (_("Database"), cr.dbname)
else:
body += _("The documents have been automatically added to your current Odoo documents.\n")
body += _("You may use your current login (%s) and password to view them.\n") % result_line.user_id.login
body += "\n\n%s\n\n" % ( (user.signature or '') )
body += "--\n"
body += _("Odoo is a powerful and user-friendly suite of Business Applications (CRM, Sales, HR, etc.)\n"
"It is open source and can be found on https://www.odoo.com.")
mail_ids.append(mail_mail.create(cr, uid, {
'email_from': user.email,
'email_to': email_to,
'subject': subject,
'body_html': '<pre>%s</pre>' % body}, context=context))
# force direct delivery, as users expect instant notification
mail_mail.send(cr, uid, mail_ids, context=context)
_logger.info('%d share notification(s) sent.', len(mail_ids))
def onchange_embed_options(self, cr, uid, ids, opt_title, opt_search, context=None):
wizard = self.browse(cr, uid, ids[0], context)
options = dict(title=opt_title, search=opt_search)
return {'value': {'embed_code': self._generate_embedded_code(wizard, options)}}
class share_result_line(osv.osv_memory):
_name = 'share.wizard.result.line'
_rec_name = 'user_id'
def _share_url(self, cr, uid, ids, _fieldname, _args, context=None):
result = dict.fromkeys(ids, '')
for this in self.browse(cr, uid, ids, context=context):
data = dict(dbname=cr.dbname, login=this.login, password=this.password)
if this.share_wizard_id and this.share_wizard_id.action_id:
data['action'] = this.share_wizard_id.action_id.id
this = this.with_context(share_url_template_hash_arguments=['action'])
result[this.id] = this.share_wizard_id.share_url_template() % data
return result
_columns = {
'user_id': fields.many2one('res.users', required=True, readonly=True),
'login': fields.related('user_id', 'login', string='Login', type='char', size=64, required=True, readonly=True),
'password': fields.char('Password', size=64, readonly=True),
'share_url': fields.function(_share_url, string='Share URL', type='char', size=512),
'share_wizard_id': fields.many2one('share.wizard', 'Share Wizard', required=True, ondelete='cascade'),
'newly_created': fields.boolean('Newly created', readonly=True),
}
_defaults = {
'newly_created': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jchodera/mdtraj | mdtraj/utils/contextmanagers.py | 16 | 1073 | from __future__ import print_function, division
import os
import time
import shutil
import tempfile
import contextlib
__all__ = ["timing", "enter_temp_directory"]
class timing(object):
"""A timing context manager
Examples
--------
>>> long_function = lambda : None
>>> with timing('long_function'):
... long_function()
long_function: 0.000 seconds
"""
def __init__(self, name='block'):
self.name = name
self.time = 0
self.start = None
self.end = None
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, ty, val, tb):
self.end = time.time()
self.time = self.end - self.start
print("%s: %0.3f seconds" % (self.name, self.time))
return False
@contextlib.contextmanager
def enter_temp_directory():
"""Create and enter a temporary directory; used as context manager."""
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
yield
os.chdir(cwd)
shutil.rmtree(temp_dir)
| lgpl-2.1 |
project-zerus/fbthrift | thrift/perf/py/test_server.py | 13 | 5946 | #!/usr/local/bin/python2.6 -tt
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import optparse
import sys
from apache.thrift.test.load import LoadTest
from apache.thrift.test.sync_load_handler import LoadHandler
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAcceleratedFactory
from thrift.protocol.THeaderProtocol import THeaderProtocol
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
from thrift.transport.THeaderTransport import THeaderTransport
from thrift.transport import TSocket, TSSLSocket
from thrift.transport import TTransport
from thrift.server import TServer, TNonblockingServer, \
TProcessPoolServer, THttpServer, TCppServer
def main():
op = optparse.OptionParser(usage='%prog [options]', add_help_option=False)
op.add_option('-p', '--port',
action='store', type='int', dest='port', default=1234,
help='The server port')
op.add_option('-s', '--servertype',
action='store', type='string', dest='servertype',
default='TGeventServer',
help='Type name of server')
op.add_option('-w', '--num_workers',
action='store', type='int', dest='workers', default=4,
help='Number of worker processes/threads')
op.add_option('-Q', '--max_queue_size',
action='store', type='int', dest='max_queue_size', default=0,
help='Max queue size, passed to TNonblockingServer')
op.add_option('-h', '--header',
action='store_true', help='Use the generated ContextIface')
op.add_option('-?', '--help',
action='help',
help='Show this help message and exit')
(options, args) = op.parse_args()
if args:
op.error('trailing arguments: ' + ' '.join(args))
handler = LoadHandler()
if options.servertype == 'TGeventServer':
# only import TGeventServer when necessary. TGeventServer calls
# monkey_patch, which breaks other servers
from apache.thrift.test.sync_load_handler import GeventLoadHandler
from thrift.server import TGeventServer
handler = GeventLoadHandler()
processor = LoadTest.Processor(handler)
if options.header:
pfactory = THeaderProtocolFactory(True, \
[THeaderTransport.HEADERS_CLIENT_TYPE,
THeaderTransport.FRAMED_DEPRECATED,
THeaderTransport.UNFRAMED_DEPRECATED,
THeaderTransport.HTTP_CLIENT_TYPE])
if options.servertype == 'TCppServer':
print 'C++ ThriftServer, Header transport, backwards compatible ' \
'with all other types'
elif options.servertype == 'TNonblockingServer':
print 'Header transport, backwards compatible with framed'
else:
print 'Header transport, backwards compatible with ' + \
'unframed, framed, http'
else:
if options.servertype == 'TCppServer':
if not options.header:
op.error('TCppServer cannot be used without header')
if options.servertype == 'TNonblockingServer':
print 'Framed transport'
else:
print 'Unframed transport'
pfactory = TBinaryProtocolAcceleratedFactory()
if options.servertype == 'TCppServer':
server = TCppServer.TCppServer(processor)
server.setPort(options.port)
print 'Worker threads: ' + str(options.workers)
server.setNWorkerThreads(options.workers)
else:
transport = TSocket.TServerSocket(options.port)
tfactory = TTransport.TBufferedTransportFactory()
if options.servertype == "TNonblockingServer":
server = TNonblockingServer.TNonblockingServer(processor, transport,
pfactory, maxQueueSize=options.max_queue_size)
elif options.servertype == "TProcessPoolServer":
server = TProcessPoolServer.TProcessPoolServer(processor, transport,
tfactory,
pfactory)
print 'Worker processes: ' + str(options.workers)
server.setNumWorkers(options.workers)
elif options.servertype == "TGeventServer":
print 'Worker processes: ' + str(options.workers)
# Gevent makes its own server transport.
server = TGeventServer.TGeventServer(options.port,
processor, None,
tfactory,
pfactory)
server.setNumWorkers(options.workers)
else:
ServerClass = getattr(TServer, options.servertype)
server = ServerClass(processor, transport, tfactory, pfactory)
print 'Serving ' + options.servertype + \
' requests on port %d...' % (options.port,)
server.serve()
if __name__ == '__main__':
rc = main()
sys.exit(rc)
| apache-2.0 |
535521469/crawler_sth | scrapy/http/headers.py | 28 | 1971 | from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaselessDict
class Headers(CaselessDict):
"""Case insensitive http headers dictionary"""
def __init__(self, seq=None, encoding='utf-8'):
self.encoding = encoding
super(Headers, self).__init__(seq)
def normkey(self, key):
"""Headers must not be unicode"""
if isinstance(key, unicode):
return key.title().encode(self.encoding)
return key.title()
def normvalue(self, value):
"""Headers must not be unicode"""
if not hasattr(value, '__iter__'):
value = [value]
return [x.encode(self.encoding) if isinstance(x, unicode) else x \
for x in value]
def __getitem__(self, key):
try:
return super(Headers, self).__getitem__(key)[-1]
except IndexError:
return None
def get(self, key, def_val=None):
try:
return super(Headers, self).get(key, def_val)[-1]
except IndexError:
return None
def getlist(self, key, def_val=None):
try:
return super(Headers, self).__getitem__(key)
except KeyError:
if def_val is not None:
return self.normvalue(def_val)
return []
def setlist(self, key, list_):
self[key] = list_
def setlistdefault(self, key, default_list=()):
return self.setdefault(key, default_list)
def appendlist(self, key, value):
lst = self.getlist(key)
lst.extend(self.normvalue(value))
self[key] = lst
def items(self):
return list(self.iteritems())
def iteritems(self):
return ((k, self.getlist(k)) for k in self.keys())
def values(self):
return [self[k] for k in self.keys()]
def to_string(self):
return headers_dict_to_raw(self)
def __copy__(self):
return self.__class__(self)
copy = __copy__
| bsd-3-clause |
MRigal/django | tests/template_tests/syntax_tests/test_numpy.py | 353 | 1429 | import warnings
from unittest import skipIf
from django.test import SimpleTestCase
from ..utils import setup
try:
import numpy
except ImportError:
numpy = False
@skipIf(numpy is False, "Numpy must be installed to run these tests.")
class NumpyTests(SimpleTestCase):
# Ignore numpy deprecation warnings (#23890)
warnings.filterwarnings(
"ignore",
"Using a non-integer number instead of an "
"integer will result in an error in the future",
DeprecationWarning
)
@setup({'numpy-array-index01': '{{ var.1 }}'})
def test_numpy_array_index01(self):
"""
Numpy's array-index syntax allows a template to access a certain
item of a subscriptable object.
"""
output = self.engine.render_to_string(
'numpy-array-index01',
{'var': numpy.array(["first item", "second item"])},
)
self.assertEqual(output, 'second item')
@setup({'numpy-array-index02': '{{ var.5 }}'})
def test_numpy_array_index02(self):
"""
Fail silently when the array index is out of range.
"""
output = self.engine.render_to_string(
'numpy-array-index02',
{'var': numpy.array(["first item", "second item"])},
)
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
loif/Grap | programming/python/reconeixement.py | 1 | 1189 | # python3 putada
# -*- coding: utf-8 -*-
import speech_recognition as sr
from time import sleep as delay
from subprocess import call
import subprocess
import RPi.GPIO as GPIO
LED=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED,GPIO.OUT)
GPIO.output(LED,False)
r = sr.Recognizer()
temps=4
delay(3)
call('clear')
delay(2)
GPIO.output(LED,True)
print("\33[38;2;0;0;255;48;2;100;100;100mGraban\x1B[0m")
delay(.3)
call(['arecord','-D','plughw:1','--duration=4','-f','cd','-vv','lect.wav'],stdout=subprocess.PIPE)
GPIO.output(LED,False)
print ("\33[38;2;0;0;255;48;2;100;100;100mGravacio complerta\x1B[0m")
call(['sox','lect.wav','-c','1','lectura.wav'],stdout=subprocess.PIPE)
with sr.WavFile("lectura.wav") as source: # fa servir el microfon per defecte
audio = r.record(source) #comença a grabar fins al duration en segons
try:
reconegut=r.recognize(audio)
print("Has dit " + reconegut) #reconeix fen servir reconeixement de Google
call(['sh','/home/pi/programes/bash/TTS.sh','you have said: %s'%reconegut])
except IndexError: # no hi ha conexio
print("No hi ha connexió a internet")
except LookupError: # no s'enten
print("No s'ha entes l'audio")
GPIO.cleanup()
| mit |
bharling/python-socketio | setup.py | 1 | 1234 | """
python-socketio
---------------
Socket.IO server.
"""
from setuptools import setup
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='python-socketio',
version='0.5.0',
url='http://github.com/miguelgrinberg/python-socketio/',
license='MIT',
author='Miguel Grinberg',
author_email='miguelgrinberg50@gmail.com',
description='Socket.IO server',
long_description=long_description,
packages=['socketio'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'six>=1.9.0',
'python-engineio>=0.6.0'
],
tests_require=[
'mock',
'pbr<1.7.0', # temporary, to workaround bug in 1.7.0
],
test_suite='tests',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| mit |
lpsinger/astropy | astropy/constants/iau2015.py | 8 | 3567 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import numpy as np
from .constant import Constant
from .config import codata
# ASTRONOMICAL CONSTANTS
class IAU2015(Constant):
default_reference = 'IAU 2015'
_registry = {}
_has_incompatible_units = set()
# DISTANCE
# Astronomical Unit (did not change from 2012)
au = IAU2015('au', "Astronomical Unit", 1.49597870700e11, 'm', 0.0,
"IAU 2012 Resolution B2", system='si')
# Parsec
pc = IAU2015('pc', "Parsec", au.value / np.radians(1. / 3600.), 'm',
au.uncertainty / np.radians(1. / 3600.),
"Derived from au + IAU 2015 Resolution B 2 note [4]", system='si')
# Kiloparsec
kpc = IAU2015('kpc', "Kiloparsec",
1000. * au.value / np.radians(1. / 3600.), 'm',
1000. * au.uncertainty / np.radians(1. / 3600.),
"Derived from au + IAU 2015 Resolution B 2 note [4]", system='si')
# Luminosity
L_bol0 = IAU2015('L_bol0', "Luminosity for absolute bolometric magnitude 0",
3.0128e28, "W", 0.0, "IAU 2015 Resolution B 2", system='si')
# SOLAR QUANTITIES
# Solar luminosity
L_sun = IAU2015('L_sun', "Nominal solar luminosity", 3.828e26,
'W', 0.0, "IAU 2015 Resolution B 3", system='si')
# Solar mass parameter
GM_sun = IAU2015('GM_sun', 'Nominal solar mass parameter', 1.3271244e20,
'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si')
# Solar mass (derived from mass parameter and gravitational constant)
M_sun = IAU2015('M_sun', "Solar mass", GM_sun.value / codata.G.value,
'kg', ((codata.G.uncertainty / codata.G.value) *
(GM_sun.value / codata.G.value)),
f"IAU 2015 Resolution B 3 + {codata.G.reference}",
system='si')
# Solar radius
R_sun = IAU2015('R_sun', "Nominal solar radius", 6.957e8, 'm', 0.0,
"IAU 2015 Resolution B 3", system='si')
# OTHER SOLAR SYSTEM QUANTITIES
# Jupiter mass parameter
GM_jup = IAU2015('GM_jup', 'Nominal Jupiter mass parameter', 1.2668653e17,
'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si')
# Jupiter mass (derived from mass parameter and gravitational constant)
M_jup = IAU2015('M_jup', "Jupiter mass", GM_jup.value / codata.G.value,
'kg', ((codata.G.uncertainty / codata.G.value) *
(GM_jup.value / codata.G.value)),
f"IAU 2015 Resolution B 3 + {codata.G.reference}",
system='si')
# Jupiter equatorial radius
R_jup = IAU2015('R_jup', "Nominal Jupiter equatorial radius", 7.1492e7,
'm', 0.0, "IAU 2015 Resolution B 3", system='si')
# Earth mass parameter
GM_earth = IAU2015('GM_earth', 'Nominal Earth mass parameter', 3.986004e14,
'm3 / (s2)', 0.0, "IAU 2015 Resolution B 3", system='si')
# Earth mass (derived from mass parameter and gravitational constant)
M_earth = IAU2015('M_earth', "Earth mass",
GM_earth.value / codata.G.value,
'kg', ((codata.G.uncertainty / codata.G.value) *
(GM_earth.value / codata.G.value)),
f"IAU 2015 Resolution B 3 + {codata.G.reference}",
system='si')
# Earth equatorial radius
R_earth = IAU2015('R_earth', "Nominal Earth equatorial radius", 6.3781e6,
'm', 0.0, "IAU 2015 Resolution B 3", system='si')
| bsd-3-clause |
nicole-a-tesla/meetup.pizza | meetuppizza/settings/base.py | 1 | 4720 | """
Django settings for meetuppizza project on Heroku. Fore more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from django.core.exceptions import ImproperlyConfigured
import os
import psycopg2
import dj_database_url
from unipath import Path
import requests
from requests_oauthlib import OAuth1
def get_env_variable(var_name):
try:
return os.getenv(var_name)
except KeyError:
error_msg = "Set the {} envifonment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = Path(__file__).ancestor(3)
PROJECT_ROOT = BASE_DIR.child("meetuppizza")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'secret!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
MEETUP_KEY = get_env_variable("MEETUP_KEY")
YELP_OAUTH_OBJECT = OAuth1(
get_env_variable('YELP_CONSUMER_KEY'),
get_env_variable('YELP_CONSUMER_SECRET'),
get_env_variable('YELP_TOKEN'),
get_env_variable('YELP_TOKEN_SECRET'))
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'super_user',
'meetup',
'pizzaplace'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'meetuppizza.urls'
TEMPLATES = (
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
)
WSGI_APPLICATION = 'meetuppizza.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.path.join(BASE_DIR, 'db.postgresql'),
}
}
AUTH_PASSWORD_VALIDATORS = (
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
| mit |
Russell-IO/ansible | test/units/module_utils/basic/test_exit_json.py | 123 | 5336 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
import pytest
EMPTY_INVOCATION = {u'module_args': {}}
class TestAnsibleModuleExitJson:
"""
Test that various means of calling exitJson and FailJson return the messages they've been given
"""
DATA = (
({}, {'invocation': EMPTY_INVOCATION}),
({'msg': 'message'}, {'msg': 'message', 'invocation': EMPTY_INVOCATION}),
({'msg': 'success', 'changed': True},
{'msg': 'success', 'changed': True, 'invocation': EMPTY_INVOCATION}),
({'msg': 'nochange', 'changed': False},
{'msg': 'nochange', 'changed': False, 'invocation': EMPTY_INVOCATION}),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('args, expected, stdin', ((a, e, {}) for a, e in DATA), indirect=['stdin'])
def test_exit_json_exits(self, am, capfd, args, expected):
with pytest.raises(SystemExit) as ctx:
am.exit_json(**args)
assert ctx.value.code == 0
out, err = capfd.readouterr()
return_val = json.loads(out)
assert return_val == expected
# Fail_json is only legal if it's called with a message
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('args, expected, stdin',
((a, e, {}) for a, e in DATA if 'msg' in a), # pylint: disable=undefined-variable
indirect=['stdin'])
def test_fail_json_exits(self, am, capfd, args, expected):
with pytest.raises(SystemExit) as ctx:
am.fail_json(**args)
assert ctx.value.code == 1
out, err = capfd.readouterr()
return_val = json.loads(out)
# Fail_json should add failed=True
expected['failed'] = True
assert return_val == expected
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_fail_json_no_msg(self, am):
with pytest.raises(AssertionError) as ctx:
am.fail_json()
assert ctx.value.args[0] == "implementation error -- msg to explain the error is required"
class TestAnsibleModuleExitValuesRemoved:
"""
Test that ExitJson and FailJson remove password-like values
"""
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
DATA = (
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('am, stdin, return_val, expected',
(({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e)
for s, r, e in DATA), # pylint: disable=undefined-variable
indirect=['am', 'stdin'])
def test_exit_json_removes_values(self, am, capfd, return_val, expected):
with pytest.raises(SystemExit):
am.exit_json(**return_val)
out, err = capfd.readouterr()
assert json.loads(out) == expected
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('am, stdin, return_val, expected',
(({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e)
for s, r, e in DATA), # pylint: disable=undefined-variable
indirect=['am', 'stdin'])
def test_fail_json_removes_values(self, am, capfd, return_val, expected):
expected['failed'] = True
with pytest.raises(SystemExit):
am.fail_json(**return_val) == expected
out, err = capfd.readouterr()
assert json.loads(out) == expected
| gpl-3.0 |
tkas/osmose-backend | plugins/Josm_transport.py | 4 | 39309 | #-*- coding: utf-8 -*-
import modules.mapcss_lib as mapcss
import regex as re # noqa
from plugins.Plugin import with_options # noqa
from plugins.PluginMapCSS import PluginMapCSS
class Josm_transport(PluginMapCSS):
MAPCSS_URL = 'https://github.com/Jungle-Bus/transport_mapcss/blob/master/transport.validator.mapcss'
def init(self, logger):
super().init(logger)
tags = capture_tags = {} # noqa
self.errors[21401] = self.def_class(item = 2140, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing public_transport:version tag on a public_transport route relation'))
self.errors[21402] = self.def_class(item = 2140, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing network tag on a public_transport relation'))
self.errors[21403] = self.def_class(item = 2140, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing operator tag on a public_transport relation'))
self.errors[21405] = self.def_class(item = 2140, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing from/to tag on a public_transport route relation'))
self.errors[21411] = self.def_class(item = 2140, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing public_transport tag on a public transport stop'))
self.errors[21412] = self.def_class(item = 2140, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing legacy tag on a public transport stop'))
self.errors[9014002] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Is it a bus stop or a bus station?'))
self.errors[9014006] = self.def_class(item = 9014, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Check if the note can be deleted'))
self.errors[9014007] = self.def_class(item = 9014, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('The network should be on the transport lines and not on the stops'))
self.errors[9014008] = self.def_class(item = 9014, level = 3, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('The operator should be on the transport lines and not on the stops'))
self.errors[9014009] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing transportation mode, add a tag route = bus/coach/tram/etc'))
self.errors[9014010] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing transportation mode, change tag route to route_master'))
self.errors[9014019] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('A bus stop is supposed to be a node'))
self.errors[9014020] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('The color of the public transport line should be in a colour tag'))
self.errors[9014021] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('The interval is invalid (try a number of minutes)'))
self.errors[9014022] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('The duration is invalid (try a number of minutes)'))
self.errors[9014023] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing interval tag to specify the main interval'))
self.errors[9014024] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Missing opening_hours tag'))
self.errors[9014025] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Check the operator tag : this operator does not exist, it may be a typo'))
self.errors[9014026] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('Check the network tag : this network does not exist, it may be a typo'))
self.errors[9014027] = self.def_class(item = 9014, level = 2, tags = mapcss.list_('tag', 'public_transport'), title = mapcss.tr('The line variant does not belong to any line, add it to the route_master relation'))
self.re_25554804 = re.compile(r'STIF|Kéolis|Véolia')
self.re_2fe0817d = re.compile(r'^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$')
self.re_6194d2a4 = re.compile(r'^(bus|coach|train|subway|monorail|trolleybus|aerialway|funicular|ferry|tram|share_taxi|light_rail|school_bus|walking_bus)$')
def node(self, data, tags):
capture_tags = {}
keys = tags.keys()
err = []
set_pt_route = set_pt_route_master = False
# node[highway=bus_stop][amenity=bus_station]
if ('amenity' in keys and 'highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop') and mapcss._tag_capture(capture_tags, 1, tags, 'amenity') == mapcss._value_capture(capture_tags, 1, 'bus_station'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Is it a bus stop or a bus station?")
# fixRemove:"amenity"
err.append({'class': 9014002, 'subclass': 1676203359, 'text': mapcss.tr('Is it a bus stop or a bus station?'), 'allow_fix_override': True, 'fix': {
'-': ([
'amenity'])
}})
# node[highway=bus_stop][!public_transport]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop') and not mapcss._tag_capture(capture_tags, 1, tags, 'public_transport'))
except mapcss.RuleAbort: pass
if match:
# group:tr("Missing public_transport tag on a public transport stop")
# -osmoseItemClassLevel:"2140/21411:0/3"
# throwError:tr("Specify if it is a stop (platform) or a location on the road (stop_position)")
# fixAdd:"public_transport=platform"
# assertNoMatch:"node highway=bus_stop public_transport=platform"
# assertNoMatch:"node highway=bus_stop public_transport=stop_position"
# assertMatch:"node highway=bus_stop"
err.append({'class': 21411, 'subclass': 0, 'text': mapcss.tr('Specify if it is a stop (platform) or a location on the road (stop_position)'), 'allow_fix_override': True, 'fix': {
'+': dict([
['public_transport','platform']])
}})
# node[railway=tram_stop][!public_transport]
if ('railway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'railway') == mapcss._value_capture(capture_tags, 0, 'tram_stop') and not mapcss._tag_capture(capture_tags, 1, tags, 'public_transport'))
except mapcss.RuleAbort: pass
if match:
# group:tr("Missing public_transport tag on a public transport stop")
# -osmoseItemClassLevel:"2140/21411:1/3"
# throwError:tr("Specify if it is a stop (platform) or a location on the rails (stop_position)")
# fixAdd:"public_transport=stop_position"
# assertNoMatch:"node railway=tram_stop public_transport=platform"
# assertNoMatch:"node railway=tram_stop public_transport=stop_position"
# assertMatch:"node railway=tram_stop"
err.append({'class': 21411, 'subclass': 1, 'text': mapcss.tr('Specify if it is a stop (platform) or a location on the rails (stop_position)'), 'allow_fix_override': True, 'fix': {
'+': dict([
['public_transport','stop_position']])
}})
# node[public_transport=platform][!highway][!railway][!bus][!tram][!ferry][!walking_bus]
if ('public_transport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'public_transport') == mapcss._value_capture(capture_tags, 0, 'platform') and not mapcss._tag_capture(capture_tags, 1, tags, 'highway') and not mapcss._tag_capture(capture_tags, 2, tags, 'railway') and not mapcss._tag_capture(capture_tags, 3, tags, 'bus') and not mapcss._tag_capture(capture_tags, 4, tags, 'tram') and not mapcss._tag_capture(capture_tags, 5, tags, 'ferry') and not mapcss._tag_capture(capture_tags, 6, tags, 'walking_bus'))
except mapcss.RuleAbort: pass
if match:
# group:tr("Missing legacy tag on a public transport stop")
# -osmoseItemClassLevel:"2140/21412:1/3"
# throwError:tr("Is this a bus or tram stop ? Add a tag to precise the kind of platform")
err.append({'class': 21412, 'subclass': 1, 'text': mapcss.tr('Is this a bus or tram stop ? Add a tag to precise the kind of platform')})
# node[public_transport=platform][!highway][!railway][bus=yes]
if ('bus' in keys and 'public_transport' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'public_transport') == mapcss._value_capture(capture_tags, 0, 'platform') and not mapcss._tag_capture(capture_tags, 1, tags, 'highway') and not mapcss._tag_capture(capture_tags, 2, tags, 'railway') and mapcss._tag_capture(capture_tags, 3, tags, 'bus') == mapcss._value_capture(capture_tags, 3, 'yes'))
except mapcss.RuleAbort: pass
if match:
# group:tr("Missing legacy tag on a public transport stop")
# -osmoseItemClassLevel:"2140/21412:0/3"
# throwError:tr("Is this a bus stop? add the tag highway=bus_stop")
# fixAdd:"highway=bus_stop"
# assertMatch:"node public_transport=platform bus=yes"
err.append({'class': 21412, 'subclass': 0, 'text': mapcss.tr('Is this a bus stop? add the tag highway=bus_stop'), 'allow_fix_override': True, 'fix': {
'+': dict([
['highway','bus_stop']])
}})
# node[highway=bus_stop][note]
# node[highway=bus_stop][note:fr][inside("FR")]
if ('highway' in keys and 'note' in keys) or ('highway' in keys and 'note:fr' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop') and mapcss._tag_capture(capture_tags, 1, tags, 'note'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop') and mapcss._tag_capture(capture_tags, 1, tags, 'note:fr') and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("Check if the note can be deleted")
err.append({'class': 9014006, 'subclass': 673170504, 'text': mapcss.tr('Check if the note can be deleted')})
# node[highway=bus_stop][network][inside("FR")]
if ('highway' in keys and 'network' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop') and mapcss._tag_capture(capture_tags, 1, tags, 'network') and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("The network should be on the transport lines and not on the stops")
# fixRemove:"network"
err.append({'class': 9014007, 'subclass': 1428913922, 'text': mapcss.tr('The network should be on the transport lines and not on the stops'), 'allow_fix_override': True, 'fix': {
'-': ([
'network'])
}})
# node[highway=bus_stop][operator][inside("FR")]
if ('highway' in keys and 'operator' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop') and mapcss._tag_capture(capture_tags, 1, tags, 'operator') and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if match:
# throwWarning:tr("The operator should be on the transport lines and not on the stops")
# fixRemove:"operator"
err.append({'class': 9014008, 'subclass': 210603856, 'text': mapcss.tr('The operator should be on the transport lines and not on the stops'), 'allow_fix_override': True, 'fix': {
'-': ([
'operator'])
}})
return err
def way(self, data, tags, nds):
capture_tags = {}
keys = tags.keys()
err = []
set_pt_route = set_pt_route_master = False
# way[highway=bus_stop]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("A bus stop is supposed to be a node")
err.append({'class': 9014019, 'subclass': 1153984743, 'text': mapcss.tr('A bus stop is supposed to be a node')})
return err
def relation(self, data, tags, members):
capture_tags = {}
keys = tags.keys()
err = []
set_pt_route = set_pt_route_master = False
# relation[type=route][!route]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'route') and not mapcss._tag_capture(capture_tags, 1, tags, 'route'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Missing transportation mode, add a tag route = bus/coach/tram/etc")
err.append({'class': 9014009, 'subclass': 828849115, 'text': mapcss.tr('Missing transportation mode, add a tag route = bus/coach/tram/etc')})
# relation[type=route_master][!route_master][!route]
if ('type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'route_master') and not mapcss._tag_capture(capture_tags, 1, tags, 'route_master') and not mapcss._tag_capture(capture_tags, 2, tags, 'route'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Missing transportation mode, add a tag route = bus/coach/tram/etc")
err.append({'class': 9014009, 'subclass': 607011337, 'text': mapcss.tr('Missing transportation mode, add a tag route = bus/coach/tram/etc')})
# relation[type=route_master][!route_master][route]
if ('route' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'route_master') and not mapcss._tag_capture(capture_tags, 1, tags, 'route_master') and mapcss._tag_capture(capture_tags, 2, tags, 'route'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Missing transportation mode, change tag route to route_master")
# fixChangeKey:"route=>route_master"
err.append({'class': 9014010, 'subclass': 3385524, 'text': mapcss.tr('Missing transportation mode, change tag route to route_master'), 'allow_fix_override': True, 'fix': {
'+': dict([
['route_master', mapcss.tag(tags, 'route')]]),
'-': ([
'route'])
}})
# relation[type=route][route=~/^(bus|coach|train|subway|monorail|trolleybus|aerialway|funicular|ferry|tram|share_taxi|light_rail|school_bus|walking_bus)$/]
if ('route' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'route') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_6194d2a4), mapcss._tag_capture(capture_tags, 1, tags, 'route')))
except mapcss.RuleAbort: pass
if match:
# setpt_route
set_pt_route = True
# relation[type=route_master][route_master=~/^(bus|coach|train|subway|monorail|trolleybus|aerialway|funicular|ferry|tram|share_taxi|light_rail|school_bus|walking_bus)$/]
if ('route_master' in keys and 'type' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'type') == mapcss._value_capture(capture_tags, 0, 'route_master') and mapcss.regexp_test(mapcss._value_capture(capture_tags, 1, self.re_6194d2a4), mapcss._tag_capture(capture_tags, 1, tags, 'route_master')))
except mapcss.RuleAbort: pass
if match:
# setpt_route_master
set_pt_route_master = True
# relation.pt_route[!public_transport:version]
if True:
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and not mapcss._tag_capture(capture_tags, 0, tags, 'public_transport:version'))
except mapcss.RuleAbort: pass
if match:
# -osmoseItemClassLevel:"2140/21401/3"
# throwError:tr("Missing public_transport:version tag on a public_transport route relation")
# assertNoMatch:"relation type=route route=bus public_transport:version=1"
# assertMatch:"relation type=route route=bus"
err.append({'class': 21401, 'subclass': 0, 'text': mapcss.tr('Missing public_transport:version tag on a public_transport route relation')})
# relation.pt_route[!network]
# relation.pt_route_master[!network]
if True:
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and not mapcss._tag_capture(capture_tags, 0, tags, 'network'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and not mapcss._tag_capture(capture_tags, 0, tags, 'network'))
except mapcss.RuleAbort: pass
if match:
# -osmoseItemClassLevel:"2140/21402/3"
# throwError:tr("Missing network tag on a public_transport relation")
# assertNoMatch:"relation type=route route=bus network=BiBiBus"
# assertMatch:"relation type=route route=bus"
err.append({'class': 21402, 'subclass': 0, 'text': mapcss.tr('Missing network tag on a public_transport relation')})
# relation.pt_route[!operator]
# relation.pt_route_master[!operator]
if True:
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and not mapcss._tag_capture(capture_tags, 0, tags, 'operator'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and not mapcss._tag_capture(capture_tags, 0, tags, 'operator'))
except mapcss.RuleAbort: pass
if match:
# -osmoseItemClassLevel:"2140/21403/3"
# throwError:tr("Missing operator tag on a public_transport relation")
# assertNoMatch:"relation type=route route=bus operator=BiBiBus"
# assertMatch:"relation type=route route=bus"
err.append({'class': 21403, 'subclass': 0, 'text': mapcss.tr('Missing operator tag on a public_transport relation')})
# relation.pt_route[!from]
# relation.pt_route[!to]
if True:
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and not mapcss._tag_capture(capture_tags, 0, tags, 'from'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route and not mapcss._tag_capture(capture_tags, 0, tags, 'to'))
except mapcss.RuleAbort: pass
if match:
# -osmoseItemClassLevel:"2140/21405/3"
# throwError:tr("Missing from/to tag on a public_transport route relation")
# assertNoMatch:"relation type=route route=bus from=A to=B"
# assertMatch:"relation type=route route=bus from=A"
# assertMatch:"relation type=route route=bus to=B"
# assertMatch:"relation type=route route=bus"
err.append({'class': 21405, 'subclass': 0, 'text': mapcss.tr('Missing from/to tag on a public_transport route relation')})
# relation.pt_route[tag(network)!=parent_tag(network)]
# Part of rule not implemented
# relation.pt_route[tag(operator)!=parent_tag(operator)]
# Part of rule not implemented
# relation.pt_route[tag(ref)!=parent_tag(ref)]
# Part of rule not implemented
# relation.pt_route[tag(colour)!=parent_tag(colour)]
# Part of rule not implemented
# relation.pt_route[tag(route)!=parent_tag(route_master)]
# Part of rule not implemented
# relation.pt_route[!colour][color]
# relation.pt_route_master[!colour][color]
if ('color' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and not mapcss._tag_capture(capture_tags, 0, tags, 'colour') and mapcss._tag_capture(capture_tags, 1, tags, 'color'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and not mapcss._tag_capture(capture_tags, 0, tags, 'colour') and mapcss._tag_capture(capture_tags, 1, tags, 'color'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("The color of the public transport line should be in a colour tag")
# fixChangeKey:"color=>colour"
err.append({'class': 9014020, 'subclass': 218794881, 'text': mapcss.tr('The color of the public transport line should be in a colour tag'), 'allow_fix_override': True, 'fix': {
'+': dict([
['colour', mapcss.tag(tags, 'color')]]),
'-': ([
'color'])
}})
# relation.pt_route["operator"=~/STIF|Kéolis|Véolia/][inside("FR")]
# relation.pt_route_master["operator"=~/STIF|Kéolis|Véolia/][inside("FR")]
if ('operator' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_25554804), mapcss._tag_capture(capture_tags, 0, tags, 'operator')) and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_25554804), mapcss._tag_capture(capture_tags, 0, tags, 'operator')) and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Check the operator tag : this operator does not exist, it may be a typo")
err.append({'class': 9014025, 'subclass': 286137008, 'text': mapcss.tr('Check the operator tag : this operator does not exist, it may be a typo')})
# relation.pt_route["network"=~/STIF|Kéolis|Véolia/][inside("FR")]
# relation.pt_route_master["network"=~/STIF|Kéolis|Véolia/][inside("FR")]
if ('network' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_25554804), mapcss._tag_capture(capture_tags, 0, tags, 'network')) and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and mapcss.regexp_test(mapcss._value_capture(capture_tags, 0, self.re_25554804), mapcss._tag_capture(capture_tags, 0, tags, 'network')) and mapcss.inside(self.father.config.options, 'FR'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Check the network tag : this network does not exist, it may be a typo")
err.append({'class': 9014026, 'subclass': 735027962, 'text': mapcss.tr('Check the network tag : this network does not exist, it may be a typo')})
# relation[highway=bus_stop]
if ('highway' in keys):
match = False
if not match:
capture_tags = {}
try: match = (mapcss._tag_capture(capture_tags, 0, tags, 'highway') == mapcss._value_capture(capture_tags, 0, 'bus_stop'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("A bus stop is supposed to be a node")
err.append({'class': 9014019, 'subclass': 1590282811, 'text': mapcss.tr('A bus stop is supposed to be a node')})
# relation.pt_route!.route_ok
if True:
match = False
# Skip selector using undeclared class pt_route, route_ok
if match:
# throwError:tr("The line variant does not belong to any line, add it to the route_master relation")
err.append({'class': 9014027, 'subclass': 1286525207, 'text': mapcss.tr('The line variant does not belong to any line, add it to the route_master relation')})
# relation.pt_route[interval][interval!~/^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$/]
# relation.pt_route_master[interval][interval!~/^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$/]
if ('interval' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and mapcss._tag_capture(capture_tags, 0, tags, 'interval') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_2fe0817d, '^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$'), mapcss._tag_capture(capture_tags, 1, tags, 'interval')))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and mapcss._tag_capture(capture_tags, 0, tags, 'interval') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_2fe0817d, '^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$'), mapcss._tag_capture(capture_tags, 1, tags, 'interval')))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("The interval is invalid (try a number of minutes)")
# assertNoMatch:"relation type=route route=bus interval=00:05"
# assertNoMatch:"relation type=route route=bus interval=00:10:00"
# assertMatch:"relation type=route route=bus interval=00:70:00"
# assertNoMatch:"relation type=route route=bus interval=02:00:00"
# assertNoMatch:"relation type=route route=bus interval=10"
# assertNoMatch:"relation type=route route=bus interval=120"
# assertNoMatch:"relation type=route route=bus interval=5"
# assertMatch:"relation type=route route=bus interval=irregular"
# assertMatch:"relation type=route route=ferry interval=2heures"
# assertMatch:"relation type=route_master route_master=bus interval=1240"
err.append({'class': 9014021, 'subclass': 170114261, 'text': mapcss.tr('The interval is invalid (try a number of minutes)')})
# relation.pt_route[duration][duration!~/^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$/]
# relation.pt_route_master[duration][duration!~/^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$/]
if ('duration' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and mapcss._tag_capture(capture_tags, 0, tags, 'duration') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_2fe0817d, '^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$'), mapcss._tag_capture(capture_tags, 1, tags, 'duration')))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and mapcss._tag_capture(capture_tags, 0, tags, 'duration') and not mapcss.regexp_test(mapcss._value_const_capture(capture_tags, 1, self.re_2fe0817d, '^([0-9][0-9]?[0-9]?|[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?)$'), mapcss._tag_capture(capture_tags, 1, tags, 'duration')))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("The duration is invalid (try a number of minutes)")
# assertMatch:"relation type=route route=bus duration=20minutes"
# assertNoMatch:"relation type=route route=bus duration=25:00"
# assertNoMatch:"relation type=route route=ferry duration=120"
# assertMatch:"relation type=route route=ferry duration=1240"
# assertNoMatch:"relation type=route route=ferry duration=20"
# assertNoMatch:"relation type=route_master route=bus duration=02:00:00"
# assertNoMatch:"relation type=route_master route_master=bus duration=5"
err.append({'class': 9014022, 'subclass': 317647061, 'text': mapcss.tr('The duration is invalid (try a number of minutes)')})
# relation.pt_route["interval:conditional"][!interval]
# relation.pt_route_master["interval:conditional"][!interval]
if ('interval:conditional' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and mapcss._tag_capture(capture_tags, 0, tags, 'interval:conditional') and not mapcss._tag_capture(capture_tags, 1, tags, 'interval'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and mapcss._tag_capture(capture_tags, 0, tags, 'interval:conditional') and not mapcss._tag_capture(capture_tags, 1, tags, 'interval'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Missing interval tag to specify the main interval")
err.append({'class': 9014023, 'subclass': 1710360237, 'text': mapcss.tr('Missing interval tag to specify the main interval')})
# relation.pt_route["interval:conditional"][!opening_hours]
# relation.pt_route_master["interval:conditional"][!opening_hours]
if ('interval:conditional' in keys):
match = False
if not match:
capture_tags = {}
try: match = (set_pt_route and mapcss._tag_capture(capture_tags, 0, tags, 'interval:conditional') and not mapcss._tag_capture(capture_tags, 1, tags, 'opening_hours'))
except mapcss.RuleAbort: pass
if not match:
capture_tags = {}
try: match = (set_pt_route_master and mapcss._tag_capture(capture_tags, 0, tags, 'interval:conditional') and not mapcss._tag_capture(capture_tags, 1, tags, 'opening_hours'))
except mapcss.RuleAbort: pass
if match:
# throwError:tr("Missing opening_hours tag")
err.append({'class': 9014024, 'subclass': 210081506, 'text': mapcss.tr('Missing opening_hours tag')})
return err
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
n = Josm_transport(None)
class _config:
options = {"country": None, "language": None}
class father:
config = _config()
n.father = father()
n.init(None)
data = {'id': 0, 'lat': 0, 'lon': 0}
self.check_not_err(n.node(data, {'highway': 'bus_stop', 'public_transport': 'platform'}), expected={'class': 21411, 'subclass': 0})
self.check_not_err(n.node(data, {'highway': 'bus_stop', 'public_transport': 'stop_position'}), expected={'class': 21411, 'subclass': 0})
self.check_err(n.node(data, {'highway': 'bus_stop'}), expected={'class': 21411, 'subclass': 0})
self.check_not_err(n.node(data, {'public_transport': 'platform', 'railway': 'tram_stop'}), expected={'class': 21411, 'subclass': 1})
self.check_not_err(n.node(data, {'public_transport': 'stop_position', 'railway': 'tram_stop'}), expected={'class': 21411, 'subclass': 1})
self.check_err(n.node(data, {'railway': 'tram_stop'}), expected={'class': 21411, 'subclass': 1})
self.check_err(n.node(data, {'bus': 'yes', 'public_transport': 'platform'}), expected={'class': 21412, 'subclass': 0})
self.check_not_err(n.relation(data, {'public_transport:version': '1', 'route': 'bus', 'type': 'route'}, []), expected={'class': 21401, 'subclass': 0})
self.check_err(n.relation(data, {'route': 'bus', 'type': 'route'}, []), expected={'class': 21401, 'subclass': 0})
self.check_not_err(n.relation(data, {'network': 'BiBiBus', 'route': 'bus', 'type': 'route'}, []), expected={'class': 21402, 'subclass': 0})
self.check_err(n.relation(data, {'route': 'bus', 'type': 'route'}, []), expected={'class': 21402, 'subclass': 0})
self.check_not_err(n.relation(data, {'operator': 'BiBiBus', 'route': 'bus', 'type': 'route'}, []), expected={'class': 21403, 'subclass': 0})
self.check_err(n.relation(data, {'route': 'bus', 'type': 'route'}, []), expected={'class': 21403, 'subclass': 0})
self.check_not_err(n.relation(data, {'from': 'A', 'route': 'bus', 'to': 'B', 'type': 'route'}, []), expected={'class': 21405, 'subclass': 0})
self.check_err(n.relation(data, {'from': 'A', 'route': 'bus', 'type': 'route'}, []), expected={'class': 21405, 'subclass': 0})
self.check_err(n.relation(data, {'route': 'bus', 'to': 'B', 'type': 'route'}, []), expected={'class': 21405, 'subclass': 0})
self.check_err(n.relation(data, {'route': 'bus', 'type': 'route'}, []), expected={'class': 21405, 'subclass': 0})
self.check_not_err(n.relation(data, {'interval': '00:05', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_not_err(n.relation(data, {'interval': '00:10:00', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_err(n.relation(data, {'interval': '00:70:00', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_not_err(n.relation(data, {'interval': '02:00:00', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_not_err(n.relation(data, {'interval': '10', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_not_err(n.relation(data, {'interval': '120', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_not_err(n.relation(data, {'interval': '5', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_err(n.relation(data, {'interval': 'irregular', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_err(n.relation(data, {'interval': '2heures', 'route': 'ferry', 'type': 'route'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_err(n.relation(data, {'interval': '1240', 'route_master': 'bus', 'type': 'route_master'}, []), expected={'class': 9014021, 'subclass': 170114261})
self.check_err(n.relation(data, {'duration': '20minutes', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014022, 'subclass': 317647061})
self.check_not_err(n.relation(data, {'duration': '25:00', 'route': 'bus', 'type': 'route'}, []), expected={'class': 9014022, 'subclass': 317647061})
self.check_not_err(n.relation(data, {'duration': '120', 'route': 'ferry', 'type': 'route'}, []), expected={'class': 9014022, 'subclass': 317647061})
self.check_err(n.relation(data, {'duration': '1240', 'route': 'ferry', 'type': 'route'}, []), expected={'class': 9014022, 'subclass': 317647061})
self.check_not_err(n.relation(data, {'duration': '20', 'route': 'ferry', 'type': 'route'}, []), expected={'class': 9014022, 'subclass': 317647061})
self.check_not_err(n.relation(data, {'duration': '02:00:00', 'route': 'bus', 'type': 'route_master'}, []), expected={'class': 9014022, 'subclass': 317647061})
self.check_not_err(n.relation(data, {'duration': '5', 'route_master': 'bus', 'type': 'route_master'}, []), expected={'class': 9014022, 'subclass': 317647061})
| gpl-3.0 |
deeplearning4j/libnd4j | tests_cpu/lib/googletest-release-1.8.0/googletest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
bhlzlx/ogre | Tools/Wings3DExporter/w2o.py | 34 | 1582 | #!/usr/bin/python
from erlang_ext import *
import types
import pprint
import io3d_wings
import io3d_ogre
import getopt
version = "0.93"
def conv(infile, outfile, writeImages, keepRotation, scaleFactor):
obj = io3d_wings.read_wings(infile, writeImages, keepRotation)
if scaleFactor != 1.0: obj.scale(scaleFactor)
io3d_ogre.write_ogre(obj, outfile)
if __name__ == "__main__":
try:
options, args = getopt.getopt(sys.argv[1:], "hviks:")
writeImages = 0
keepRotation = 0
scaleFactor = 1.0
for o in options:
option, value = o
if option == '-h':
print """Usage:
w2o [-iks] file1.wings file2.wings
Options:
-h This help
-v Print version
-i Export images from the wings file via PIL
-k Keep the coordinates as they are, do not correct them
to the OGRE coordinate system. Use this if you
want to rotate your objects around the X axis in
code or if you already rotated the objects in Wings.
-s n Scale the object uniformly using the given floating
point factor.
"""
sys.exit(1)
elif option == '-v':
print "w2o", version
sys.exit(1)
elif option == '-i': writeImages = 1
elif option == '-k': keepRotation = 1
elif option == '-s': scaleFactor = float(value)
for arg in args:
# process filename
if arg[-6:] == ".wings":
dstname = arg[:-6]
else:
dstname = arg
dstname += ".mesh.xml"
conv(arg, dstname, writeImages, keepRotation, scaleFactor)
except getopt.GetoptError, e:
print e
print "try -h for help"
sys.exit(1)
| mit |
B-UMMI/INNUca | src/SPAdes-3.13.0-Linux/share/spades/joblib2/disk.py | 19 | 3281 | """
Disk management utilities.
"""
# Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Copyright (c) 2010 Gael Varoquaux
# License: BSD Style, 3 clauses.
import errno
import os
import shutil
import sys
import time
def disk_used(path):
""" Return the disk usage in a directory."""
size = 0
for file in os.listdir(path) + ['.']:
stat = os.stat(os.path.join(path, file))
if hasattr(stat, 'st_blocks'):
size += stat.st_blocks * 512
else:
# on some platform st_blocks is not available (e.g., Windows)
# approximate by rounding to next multiple of 512
size += (stat.st_size // 512 + 1) * 512
# We need to convert to int to avoid having longs on some systems (we
# don't want longs to avoid problems we SQLite)
return int(size / 1024.)
def memstr_to_kbytes(text):
""" Convert a memory text to it's value in kilobytes.
"""
kilo = 1024
units = dict(K=1, M=kilo, G=kilo ** 2)
try:
size = int(units[text[-1]] * float(text[:-1]))
except (KeyError, ValueError):
raise ValueError(
"Invalid literal for size give: %s (type %s) should be "
"alike '10G', '500M', '50K'." % (text, type(text))
)
return size
def mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
# then retry once. if it still fails, raise the exception
RM_SUBDIRS_RETRY_TIME = 0.1
def rm_subdirs(path, onerror=None):
"""Remove all subdirectories in this path.
The directory indicated by `path` is left in place, and its subdirectories
are erased.
If onerror is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If onerror is None,
an exception is raised.
"""
# NOTE this code is adapted from the one in shutil.rmtree, and is
# just as fast
names = []
try:
names = os.listdir(path)
except os.error, err:
if onerror is not None:
onerror(os.listdir, path, sys.exc_info())
else:
raise
for name in names:
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
if onerror is not None:
shutil.rmtree(fullname, False, onerror)
else:
# allow the rmtree to fail once, wait and re-try.
# if the error is raised again, fail
err_count = 0
while True:
try:
shutil.rmtree(fullname, False, None)
break
except os.error, err:
if err_count > 0:
raise
err_count += 1
time.sleep(RM_SUBDIRS_RETRY_TIME)
| gpl-3.0 |
kjc88/sl4a | python/gdata/src/gdata/tlslite/integration/SMTP_TLS.py | 319 | 4739 | """TLS Lite + smtplib."""
from smtplib import SMTP
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
class SMTP_TLS(SMTP):
"""This class extends L{smtplib.SMTP} with TLS support."""
def starttls(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
helper = ClientHelper(
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
conn = TLSConnection(self.sock)
conn.closeSocket = True
helper._handshake(conn)
self.sock = conn
self.file = conn.makefile('rb')
return (resp, reply)
| apache-2.0 |
Dellware78/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/hard_dependency/gyptest-exported-hard-dependency.py | 350 | 1332 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a hard_dependency that is exported is pulled in as a dependency
for a target if the target is a static library and if the generator will
remove dependencies between static libraries.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'dump_dependency_json':
test.skip_test('Skipping test; dependency JSON does not adjust ' \
'static libraries.\n')
test.run_gyp('hard_dependency.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
test.build('hard_dependency.gyp', 'c', chdir=chdir)
# The 'a' static library should be built, as it has actions with side-effects
# that are necessary to compile 'c'. Even though 'c' does not directly depend
# on 'a', because 'a' is a hard_dependency that 'b' exports, 'c' should import
# it as a hard_dependency and ensure it is built before building 'c'.
test.built_file_must_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist('c', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('d', type=test.STATIC_LIB, chdir=chdir)
test.pass_test()
| gpl-3.0 |
daniel-fanjul-alcuten/ice | lib/gmock-1.6.0/gtest/scripts/gen_gtest_pred_impl.py | 412 | 21984 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
#error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s),\\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| gpl-3.0 |
def-/commandergenius | project/jni/python/src/Lib/lib2to3/fixes/fix_sys_exc.py | 53 | 1030 | """Fixer for sys.exc_{type, value, traceback}
sys.exc_type -> sys.exc_info()[0]
sys.exc_value -> sys.exc_info()[1]
sys.exc_traceback -> sys.exc_info()[2]
"""
# By Jeff Balogh and Benjamin Peterson
# Local imports
from .. import fixer_base
from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
class FixSysExc(fixer_base.BaseFix):
# This order matches the ordering of sys.exc_info().
exc_info = ["exc_type", "exc_value", "exc_traceback"]
PATTERN = """
power< 'sys' trailer< dot='.' attribute=(%s) > >
""" % '|'.join("'%s'" % e for e in exc_info)
def transform(self, node, results):
sys_attr = results["attribute"][0]
index = Number(self.exc_info.index(sys_attr.value))
call = Call(Name("exc_info"), prefix=sys_attr.get_prefix())
attr = Attr(Name("sys"), call)
attr[1].children[0].set_prefix(results["dot"].get_prefix())
attr.append(Subscript(index))
return Node(syms.power, attr, prefix=node.get_prefix())
| lgpl-2.1 |
GoogleCloudPlatform/storage-signedurls-python | gcs-signed-url-example.py | 2 | 6727 | # Copyright 2013 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains an example of using Google Cloud Storage Signed URLs."""
import base64
import datetime
import md5
import sys
import time
import Crypto.Hash.SHA256 as SHA256
import Crypto.PublicKey.RSA as RSA
import Crypto.Signature.PKCS1_v1_5 as PKCS1_v1_5
import requests
try:
import conf
except ImportError:
sys.exit('Configuration module not found. You must create a conf.py file. '
'See the example in conf.example.py.')
# The Google Cloud Storage API endpoint. You should not need to change this.
GCS_API_ENDPOINT = 'https://storage.googleapis.com'
class CloudStorageURLSigner(object):
"""Contains methods for generating signed URLs for Google Cloud Storage."""
def __init__(self, key, client_id_email, gcs_api_endpoint, expiration=None,
session=None):
"""Creates a CloudStorageURLSigner that can be used to access signed URLs.
Args:
key: A PyCrypto private key.
client_id_email: GCS service account email.
gcs_api_endpoint: Base URL for GCS API.
expiration: An instance of datetime.datetime containing the time when the
signed URL should expire.
session: A requests.session.Session to use for issuing requests. If not
supplied, a new session is created.
"""
self.key = key
self.client_id_email = client_id_email
self.gcs_api_endpoint = gcs_api_endpoint
self.expiration = expiration or (datetime.datetime.now() +
datetime.timedelta(days=1))
self.expiration = int(time.mktime(self.expiration.timetuple()))
self.session = session or requests.Session()
def _Base64Sign(self, plaintext):
"""Signs and returns a base64-encoded SHA256 digest."""
shahash = SHA256.new(plaintext)
signer = PKCS1_v1_5.new(self.key)
signature_bytes = signer.sign(shahash)
return base64.b64encode(signature_bytes)
def _MakeSignatureString(self, verb, path, content_md5, content_type):
"""Creates the signature string for signing according to GCS docs."""
signature_string = ('{verb}\n'
'{content_md5}\n'
'{content_type}\n'
'{expiration}\n'
'{resource}')
return signature_string.format(verb=verb,
content_md5=content_md5,
content_type=content_type,
expiration=self.expiration,
resource=path)
def _MakeUrl(self, verb, path, content_type='', content_md5=''):
"""Forms and returns the full signed URL to access GCS."""
base_url = '%s%s' % (self.gcs_api_endpoint, path)
signature_string = self._MakeSignatureString(verb, path, content_md5,
content_type)
signature_signed = self._Base64Sign(signature_string)
query_params = {'GoogleAccessId': self.client_id_email,
'Expires': str(self.expiration),
'Signature': signature_signed}
return base_url, query_params
def Get(self, path):
"""Performs a GET request.
Args:
path: The relative API path to access, e.g. '/bucket/object'.
Returns:
An instance of requests.Response containing the HTTP response.
"""
base_url, query_params = self._MakeUrl('GET', path)
return self.session.get(base_url, params=query_params)
def Put(self, path, content_type, data):
"""Performs a PUT request.
Args:
path: The relative API path to access, e.g. '/bucket/object'.
content_type: The content type to assign to the upload.
data: The file data to upload to the new file.
Returns:
An instance of requests.Response containing the HTTP response.
"""
md5_digest = base64.b64encode(md5.new(data).digest())
base_url, query_params = self._MakeUrl('PUT', path, content_type,
md5_digest)
headers = {}
headers['Content-Type'] = content_type
headers['Content-Length'] = str(len(data))
headers['Content-MD5'] = md5_digest
return self.session.put(base_url, params=query_params, headers=headers,
data=data)
def Delete(self, path):
"""Performs a DELETE request.
Args:
path: The relative API path to access, e.g. '/bucket/object'.
Returns:
An instance of requests.Response containing the HTTP response.
"""
base_url, query_params = self._MakeUrl('DELETE', path)
return self.session.delete(base_url, params=query_params)
def ProcessResponse(r, expected_status=200):
"""Prints request and response information and checks for desired return code.
Args:
r: A requests.Response object.
expected_status: The expected HTTP status code.
Raises:
SystemExit if the response code doesn't match expected_status.
"""
print '--- Request ---'
print r.request.url
for header, value in r.request.headers.iteritems():
print '%s: %s' % (header, value)
print '---------------'
print '--- Response (Status %s) ---' % r.status_code
print r.content
print '-----------------------------'
print
if r.status_code != expected_status:
sys.exit('Exiting due to receiving %d status code when expecting %d.'
% (r.status_code, expected_status))
def main():
try:
keytext = open(conf.PRIVATE_KEY_PATH, 'rb').read()
except IOError as e:
sys.exit('Error while reading private key: %s' % e)
private_key = RSA.importKey(keytext)
signer = CloudStorageURLSigner(private_key, conf.SERVICE_ACCOUNT_EMAIL,
GCS_API_ENDPOINT)
file_path = '/%s/%s' % (conf.BUCKET_NAME, conf.OBJECT_NAME)
print 'Creating file...'
print '================'
r = signer.Put(file_path, 'text/plain', 'blah blah')
ProcessResponse(r)
print 'Retrieving file...'
print '=================='
r = signer.Get(file_path)
ProcessResponse(r)
print 'Deleting file...'
print '================'
r = signer.Delete(file_path)
ProcessResponse(r, expected_status=204)
print 'Done.'
if __name__ == '__main__':
main()
| apache-2.0 |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/core.py | 8 | 38128 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core classes and core ops for LabeledTensor.
Core ops are ops which will eventually be called by LabeledTensor methods,
and ops which a core op depends upon.
For example, `add` is a core op because we'll eventually support the `+`
operator.
Non-core ops should go in `ops.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import numbers
import types
import numpy as np
from six import binary_type
from six import string_types
from six import text_type
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# pylint: disable=invalid-name
# Types coercible to Axis.labels
# We use this instead of collections.Sequence to exclude strings.
LabelsLike = tc.Union(np.ndarray, range, list, tuple)
# Types coercible to a tf.Dimension
DimensionLike = tc.Optional(tc.Union(tensor_shape.Dimension, int))
# Types usable for axis values
AxisValue = tc.Union(LabelsLike, DimensionLike)
# Valid scalar values for TensorFlow
Scalar = tc.Union(numbers.Number, bool, binary_type, text_type)
# pylint: enable=invalid-name
class Axis(object):
"""Size and label information for an axis.
Axis contains either a tf.Dimension indicating the size of an axis,
or a tuple of tick labels for the axis.
If tick labels are provided, they must be unique.
"""
@tc.accepts(object, string_types, AxisValue)
def __init__(self, name, value):
"""Construct an Axis.
Args:
name: Name of the axis.
value: Either None, an int or tf.Dimension giving the size of the axis,
or a sequence that is not a string additionally providing coordinate
(tick) labels.
Raises:
ValueError: If the user provides labels with duplicate values.
"""
if isinstance(value, tensor_shape.Dimension):
dimension = value
labels = None
elif isinstance(value, int) or value is None:
dimension = tensor_shape.Dimension(value)
labels = None
else:
dimension = tensor_shape.Dimension(len(value))
labels = tuple(value)
if dimension.value == 0:
# Treat a zero-length axis as if it has labels.
labels = ()
if labels is not None:
index = dict(zip(labels, range(len(labels))))
if len(index) != len(labels):
raise ValueError('Tick labels must be unique, but got {}'
.format(labels))
else:
index = None
self._name = name # type: string_types
self._dimension = dimension # type: tensor_shape.Dimension
self._labels = labels # type: Optional[tuple]
self._index = index # type: Optional[Dict[Any, int]]
@property
@tc.returns(string_types)
def name(self):
return self._name
@tc.returns(string_types)
def __repr__(self):
# Axis('x', Dimension(2))
# TODO(shoyer): make very long reprs more succint?
return "%s('%s', %r)" % (type(self).__name__, self.name, self.value)
@tc.returns(bool)
def __eq__(self, other):
return (isinstance(other, Axis) and
self.name == other.name and
self.size == other.size and
self.labels == other.labels)
def __hash__(self):
return hash((self.name, self.size, self.labels))
@tc.returns(bool)
def __ne__(self, other):
return not self == other
@tc.returns(int)
def __len__(self):
size = self.size
if size is None:
raise ValueError('axis %r has unknown length' % self.name)
return size
@property
@tc.returns(tc.Optional(tensor_shape.Dimension))
def dimension(self):
return self._dimension
@property
@tc.returns(tc.Optional(int))
def size(self):
return self._dimension.value
@property
@tc.returns(tc.Union(tuple, tensor_shape.Dimension))
def value(self):
"""Returns the tf.Dimension or tuple specifying axis ticks."""
if self.labels is None:
return self.dimension
else:
return self.labels
@property
@tc.returns(tc.Optional(tuple))
def labels(self):
"""Returns the tuple containing coordinate labels, else None."""
return self._labels
def index(self, value):
"""Returns the integer position of the given tick label."""
if self._index is None:
raise ValueError('Axis does not have tick labels')
return self._index[value]
# tc class for anything that can be coerced into an Axis
# pylint: disable=invalid-name
AxisLike = tc.Union(Axis, tc.Tuple(string_types, AxisValue))
# pylint: enable=invalid-name
@tc.returns(Axis)
@tc.accepts(AxisLike)
def as_axis(axis_data):
"""Convert an AxisLike object into an Axis.
Args:
axis_data: Axis object or tuple (axis_name, axis_value) describing an axis.
Returns:
Axis object. This may be the original object if axis_data is an Axis.
"""
if isinstance(axis_data, Axis):
axis = axis_data
else:
axis = Axis(*axis_data)
return axis
class Axes(collections.Mapping):
"""Axis names and indices for a tensor.
It is an ordered mapping, with keys given by axis name and values given
by Axis objets. Duplicate axis names are not allowed.
"""
@tc.accepts(object, tc.List(AxisLike))
def __init__(self, axes):
"""Construct an Axes.
Args:
axes: A list of Axis objects or (axis_name, axis_value) tuples.
Raises:
ValueError: If the user provides empty or duplicate axis names.
"""
self._axes = collections.OrderedDict()
for axis_data in axes:
axis = as_axis(axis_data)
name = axis.name
if name in self._axes:
raise ValueError('Duplicate axis name: %s' % name)
self._axes[name] = axis
def __iter__(self):
return iter(self._axes)
@tc.returns(string_types)
def __repr__(self):
# Axes([('x', Dimension(2)),
# ('y', ['a', 'b', 'c']),
# ('z', Dimension(4))])
cls_name = type(self).__name__
values = ["('%s', %r)" % (v.name, v.value) for v in self._axes.values()]
values_repr = (',\n' + ' ' * len(cls_name + '([')).join(values)
return '%s([%s])' % (cls_name, values_repr)
@tc.returns(Axis)
@tc.accepts(object, string_types)
def __getitem__(self, name):
return self._axes[name]
@tc.returns(bool)
def __contains__(self, name):
return name in self._axes
@tc.returns(int)
def __len__(self):
return len(self._axes)
def __hash__(self):
return hash(tuple(self.items()))
@tc.accepts(object, string_types)
def remove(self, axis_name):
"""Creates a new Axes object without the given axis."""
if axis_name not in self:
raise KeyError(axis_name)
remaining_axes = [axis for axis in self.values() if axis.name != axis_name]
return Axes(remaining_axes)
class LabeledTensor(object):
"""A tensor with annotated axes.
It has the following invariants:
1) The dimensionality of the tensor is equal to the number of elements
in axes.
2) The number of coordinate values in the ith dimension is equal to the
size of the tensor in the ith dimension.
Attributes:
tensor: tf.Tensor containing the data.
axes: lt.Axes containing axis names and coordinate labels.
"""
@tc.accepts(object, ops.Tensor,
tc.Union(Axes, tc.Collection(tc.Union(string_types, AxisLike))))
def __init__(self, tensor, axes):
"""Construct a LabeledTenor.
Args:
tensor: The underlying tensor containing the data.
axes: An Axes object, or a collection of strings, Axis objects or tuples
of (name, value) pairs indicating the axes.
Raises:
ValueError: If the provided axes do not satisfy the class invariants.
"""
self._tensor = tensor
shape = tensor.get_shape()
if isinstance(axes, Axes):
unvalidated_axes = axes
else:
mutable_axes = []
for position, axis_like in enumerate(axes):
if isinstance(axis_like, string_types):
# The coordinates for this axes are unlabeled.
# Infer the size of the axis.
value = shape[position]
axis_like = (axis_like, value)
mutable_axes.append(axis_like)
# Construct the Axis object, which will additionally validate the contents
# of the object.
unvalidated_axes = Axes(mutable_axes)
# Check our invariants.
# First, the rank of the tensor must be equal to the number of axes.
if len(shape) != len(unvalidated_axes):
raise ValueError('Tensor rank was not equal to the number of axes: %r, %r'
% (shape, unvalidated_axes))
# Second, the size of each tensor dimension must match the size of the
# corresponding indices.
for (d, axis) in zip(shape, unvalidated_axes.values()):
if d != axis.size:
raise ValueError(
'Provided axis size %d does not match tensor dimension size %d' %
(axis.size, d))
self._axes = unvalidated_axes
def __repr__(self):
# <LabeledTensor 'foo' shape=(2, 3, 4) dtype=float32
# axes=[('x', Dimension(2)),
# ('y', ('a', 'b', 'c'),
# ('z', Dimension(4))]>
axes = ["('%s', %r)" % (v.name, v.value) for v in self.axes.values()]
axes_repr = (',\n' + ' ' * len(' axes=[')).join(axes)
return ("<%s '%s' shape=%s dtype=%s\n axes=[%s]>" %
(type(self).__name__, self.tensor.name, self.tensor.get_shape(),
self.tensor.dtype.name, axes_repr))
@property
def tensor(self):
return self._tensor
def _as_graph_element(self):
"""Support tf.Graph.as_graph_element on LabeledTensor objects.
This allows operations such as tf.name_scope to take labeled tensors.
Returns:
self.tensor
"""
return self.tensor
@property
def axes(self):
return self._axes
# properties/methods directly borrowed from tf.Tensor:
@property
def dtype(self):
return self._tensor.dtype
@property
def name(self):
return self._tensor.name
def get_shape(self):
"""Returns the TensorShape that represents the shape of this tensor.
See tf.Tensor.get_shape().
Returns:
A TensorShape representing the shape of this tensor.
"""
return self._tensor.get_shape()
# TODO(shoyer): consider how/if to implement .eval(). Maybe it should return
# an xarray.DataArray?
def __getitem__(self, key):
# This should work exactly like tf.Tensor.__getitem__, except it preserves
# labels.
if not isinstance(key, tuple):
key = (key,)
if len(key) != len(self.axes):
raise ValueError('indexer %r must have the same length as the Tensor '
'rank (%r)' % (key, len(self.axes)))
selection = {a: k for a, k in zip(self.axes.keys(), key)}
return slice_function(self, selection)
# special methods for overloading arithmetic operations:
def __abs__(self):
return abs_function(self)
def __neg__(self):
return neg(self)
def __pos__(self):
return self
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(other, self)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __mul__(self, other):
return mul(self, other)
def __rmul__(self, other):
return mul(other, self)
def __truediv__(self, other):
return div(self, other)
__div__ = __truediv__
def __rtruediv__(self, other):
return div(other, self)
__rdiv__ = __rtruediv__
def __mod__(self, other):
return mod(self, other)
def __rmod__(self, other):
return mod(other, self)
def __pow__(self, other):
return pow_function(self, other)
def __rpow__(self, other):
return pow_function(other, self)
# logical operations:
def __invert__(self):
return logical_not(self)
def __and__(self, other):
return logical_and(self, other)
def __or__(self, other):
return logical_or(self, other)
def __xor__(self, other):
return logical_xor(self, other)
# boolean operations:
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __gt__(self, other):
return greater(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __eq__(self, other):
# for consistency with tf.Tensor
if not isinstance(other, LabeledTensor):
return False
return self.tensor == other.tensor and self.axes == other.axes
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.tensor, self.axes))
# typecheck type abbreviations:
# abbreviations for third-party types with very long reprs
tc.register_type_abbreviation(tensor_shape.Dimension, 'tensorflow.Dimension')
tc.register_type_abbreviation(ops.Tensor, 'tensorflow.Tensor')
tc.register_type_abbreviation(dtypes.DType, 'tensorflow.DType')
# core LabeledTensor types
tc.register_type_abbreviation(Axis, 'labeled_tensor.Axis')
tc.register_type_abbreviation(Axes, 'labeled_tensor.Axes')
tc.register_type_abbreviation(LabeledTensor, 'labeled_tensor.LabeledTensor')
@tc.returns(ops.Tensor)
@tc.accepts(LabeledTensor)
def _convert_labeled_tensor_to_tensor(value, *args, **kwargs):
# call ops.convert_to_tensor to handle optional arguments appropriately
return ops.internal_convert_to_tensor(value.tensor, *args, **kwargs)
ops.register_tensor_conversion_function(
LabeledTensor, _convert_labeled_tensor_to_tensor)
# tc class for anything that can be coerced into a LabeledTensor
# pylint: disable=invalid-name
LabeledTensorLike = tc.Union(LabeledTensor, ops.Tensor, np.ndarray, Scalar)
# pylint: enable=invalid-name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, object, tc.Optional(string_types))
def convert_to_labeled_tensor(value, dtype=None, name=None):
"""Converts the given `value` to a `LabeledTensor`.
This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects
and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors
must use the `LabeledTensor` constructor explicitly.
Args:
value: Object to convert.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of value.
name: Optional name to use if a new Tensor is created.
Returns:
`value` converted into a `LabeledTensor` object.
Raises:
ValueError: If the output would have rank>0 but the input was not already a
`LabeledTensor`.
"""
# TODO(shoyer): consider extending to accept xarray.DataArray as input.
if isinstance(value, LabeledTensor):
axes = value.axes.values()
value = value.tensor
else:
axes = []
# We call convert_to_tensor even for LabeledTensor input because it also
# checks to make sure the dtype argument is compatible.
tensor = ops.convert_to_tensor(value, dtype=dtype, name=name)
if len(tensor.get_shape()) != len(axes):
raise ValueError('cannot automatically convert unlabeled arrays or tensors '
'with rank>0 into LabeledTensors: %r' % value)
return LabeledTensor(tensor, axes)
@tc.returns(Axis)
@tc.accepts(tc.Collection(Axis))
def concat_axes(axes):
"""Concatenate a list of Axes.
Args:
axes: A collection of Axis objects.
Returns:
The concatenation of the axes.
If all axes have labels, the result has the concatenation of the labels.
Else, the result has no labels, and its size is the sum of the sizes
of the axes.
Raises:
ValueError: If `others` is not a collection of Axes or if it is empty.
"""
if not axes:
raise ValueError('axes must not be empty')
for a in axes:
if not isinstance(a, Axis):
raise ValueError('Expected an Axis, but got %r of type %r' % (a, type(a)))
names = set(a.name for a in axes)
if len(names) > 1:
raise ValueError('axes do not all have the same name: %r' % names)
name, = names
all_have_labels = all(a.labels is not None for a in axes)
any_has_unknown_size = any(a.size is None for a in axes)
if all_have_labels:
value = tuple(label for a in axes for label in a.labels)
elif any_has_unknown_size:
value = None
else:
value = sum(len(a) for a in axes)
return Axis(name, value)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def identity(labeled_tensor, name=None):
"""The identity op.
See tf.identity.
Args:
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
The tensor.
"""
with ops.name_scope(name, 'lt_identity', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
return LabeledTensor(
array_ops.identity(labeled_tensor.tensor, name=scope),
labeled_tensor.axes)
# We don't call this slice because that shadows a built-in. Instead, we alias
# this to lt.slice in __init__.py.
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Mapping(string_types, tc.Union(int, slice)),
tc.Optional(string_types))
def slice_function(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
This is an analogue of tf.slice.
For example:
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])
>>> lt.slice(labeled_tensor, {'a': slice(0, 2), 'b': 1})
<LabeledTensor 'lt_slice:...' shape=(2,) dtype=int32
axes=[('a', Dimension(2))]>
Args:
labeled_tensor: The input tensor.
selection: A dictionary of type str -> Union(int, slice of int) mapping
axis names to sub-selections.
name: Optional op name.
Returns:
The slice as a `LabeledTensor`.
"""
with ops.name_scope(name, 'lt_slice', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
slices = []
for axis_name in labeled_tensor.axes:
if axis_name not in selection:
# We're not sub-selecting this axis, so use the full slice.
slices.append(slice(None))
else:
slices.append(selection[axis_name])
sliced_tensor = labeled_tensor.tensor[tuple(slices)]
sliced_axes = []
for axis, s in zip(labeled_tensor.axes.values(), slices):
# We sub-select this axis's index with the slice s.
# `s` is either an int or a proper slice.
if isinstance(s, slice):
if axis.labels is None:
# We're not tracking coordinate names for this axis.
sliced_axes.append(axis.name)
else:
sliced_axes.append((axis.name, axis.labels[s]))
else:
# If the slice is an int this dimension now has size 1, so we remove it.
assert isinstance(s, int)
return LabeledTensor(array_ops.identity(sliced_tensor, name=scope),
sliced_axes)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)),
tc.Optional(string_types))
def transpose(labeled_tensor, axis_order=None, name=None):
"""Permute a tensor's axes.
See tf.transpose.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. By default, the
order of axes is reversed.
name: Optional op name.
Returns:
The permuted tensor.
Raises:
ValueError: If axis_order isn't a permutation of the existing axes.
"""
with ops.name_scope(name, 'lt_transpose', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
original_order = list(labeled_tensor.axes.keys())
if axis_order is None:
axis_order = list(reversed(original_order))
elif sorted(axis_order) != sorted(original_order):
raise ValueError(
'The new axis order must have the same names as the original axes, '
'but the new order is %r while the original order is %r' %
(axis_order, original_order))
axis_names = list(labeled_tensor.axes.keys())
permutation = [axis_names.index(n) for n in axis_order]
# Note: TensorFlow doesn't copy data for the identity tranpose.
transpose_tensor = array_ops.transpose(labeled_tensor.tensor,
permutation,
name=scope)
permuted_axes = [labeled_tensor.axes[n] for n in axis_order]
return LabeledTensor(transpose_tensor, permuted_axes)
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Collection(tc.Union(string_types, tc.Tuple(
string_types, collections.Hashable))), tc.Optional(string_types))
def expand_dims(labeled_tensor, axes, name=None):
"""Insert dimensions of size 1.
See tf.expand_dims.
Args:
labeled_tensor: The input tensor.
axes: The desired axis names as strings or tuples of (name, label),
where `label` is the coordinate name for the new dimension `name`.
These must include the existing axis names, and the existing names must
appear in the same order in this list as they do in the input tensor.
name: Optional op name.
Returns:
A tensor with an axis for each axis in axes.
New axes are created with size 1 and do not have labeled coordinates.
Raises:
AxisOrderError: If axis names don't appear in the same order in axes
and the labeled tensor.
"""
with ops.name_scope(name, 'lt_expand_dims', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
axis_names = [a if isinstance(a, string_types) else a[0] for a in axes]
check_axis_order(labeled_tensor, axis_names)
reshaped_axes = []
shape = []
for axis_spec in axes:
if axis_spec in labeled_tensor.axes:
axis = labeled_tensor.axes[axis_spec]
reshaped_axes.append(axis)
shape.append(-1 if axis.size is None else axis.size)
else:
if isinstance(axis_spec, string_types):
reshaped_axes.append((axis_spec, 1))
else:
(name, label) = axis_spec
reshaped_axes.append((name, (label,)))
shape.append(1)
reshaped_tensor = array_ops.reshape(labeled_tensor.tensor, shape,
name=scope)
return LabeledTensor(reshaped_tensor, reshaped_axes)
# This should only be added to a graph collection once.
_AXIS_ORDER_KEY = ('__axis_order',)
@tc.returns(tc.Optional(tc.List(string_types)))
def get_axis_order():
"""Get the axis_order set by any containing axis_order_scope.
Returns:
List of strings giving an order to use for axis names, or None, if no axis
order is set.
"""
# By storing axis_order in the graph, we can ensure that axis_order_scope is
# thread-safe.
axis_order_list = ops.get_collection(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order, = axis_order_list
else:
axis_order = None
return axis_order
@tc.accepts(tc.Optional(tc.List(string_types)))
def _set_axis_order(axis_order):
axis_order_list = ops.get_collection_ref(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order_list[0] = axis_order
else:
axis_order_list.append(axis_order)
@contextlib.contextmanager
@tc.accepts(tc.Optional(tc.List(string_types)))
def axis_order_scope(axis_order=None):
"""Set axis order for the result of broadcasting operations within a scope.
This allows you to ensure that tensors resulting from arithmetic have a
predictable axis order.
Example usage:
with lt.axis_order_scope(['x', 'y', 'z']):
# result is guranteed to have the correct axis order
result = w + b
You can nest scopes, in which case only the inner-most scope applies, e.g.,
with lt.axis_order(['x', 'y', 'z']):
with lt.axis_order():
result = w + b # uses the default (left-most) axis ordering
Args:
axis_order: optional list of strings providing axis names. By default,
creates a scope without axis order.
Yields:
The provided axis_order or `None`.
"""
original_axis_order = get_axis_order()
_set_axis_order(axis_order)
try:
yield axis_order
finally:
_set_axis_order(original_axis_order)
@tc.returns(tc.List(string_types))
def _get_valid_axis_order():
axis_order = get_axis_order()
if axis_order is None:
raise AxisOrderError('an explicit axis order must be provided with the '
'axis_order argument or by using an axis_order_scope')
return axis_order
class AxisOrderError(ValueError):
"""Error class for cases where there is no valid axis order."""
# TODO(shoyer): should this function accept a list of labeled tensors instead?
@tc.returns(type(None))
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)))
def check_axis_order(labeled_tensor, axis_order=None):
"""Verify that the given tensor has a consistent axis order.
Args:
labeled_tensor: The input tensor. All axes on this tensor must appear in
axis_order.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
Raises:
AxisOrderError: If the axis_order is unavailable, inconsistent or does not
include all existing axes.
"""
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
if len(relevant_axis_order) < len(labeled_tensor.axes):
raise AxisOrderError(
'not all axis names appear in the required axis order %r: %r' %
(axis_order, labeled_tensor))
if relevant_axis_order != list(labeled_tensor.axes):
raise AxisOrderError(
'axes on a labeled tensor do not appear in the same order as the '
'required axis order %r: %r' % (axis_order, labeled_tensor))
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(tc.Collection(string_types)),
tc.Optional(string_types))
def impose_axis_order(labeled_tensor, axis_order=None, name=None):
"""Impose desired axis order on a labeled tensor.
Args:
labeled_tensor: The input tensor.
axis_order: Optional desired axis order, as a list of names. If not
provided, defaults to the current axis_order_scope (if set).
name: Optional op name.
Returns:
Labeled tensor with possibly transposed axes.
Raises:
AxisOrderError: If no axis_order is provided or axis_order does not contain
all axes on the input tensor.
"""
with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
if axis_order is None:
axis_order = _get_valid_axis_order()
relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]
return transpose(labeled_tensor, relevant_axis_order, name=scope)
@tc.returns(tc.Optional(list))
@tc.accepts(list, list)
def _find_consistent_ordering(a, b):
"""Find the left-most consistent ordering between two lists of unique items.
A consistent ordering combines all elements in both a and b while keeping all
elements in their original order in both inputs. The left-most consistent
ordering orders elements from `a` not found in `b` before elements in `b` not
found in `a`.
For example, given ['x', 'z'] and ['y', 'z'], both ['x', 'y', 'z'] and ['y',
'x', 'z'] are consistent orderings because each of the inputs appears in
each consistent ordering in the same order, and ['x', 'y', 'z'] is the
left-most, because 'x' appears only in `a` and 'y' appears only in `b`. In
contrast, there is no consistent ordering between ['x', 'y'] and ['y', 'x'].
Args:
a: list with unique elements.
b: list with unique elements.
Returns:
List containing all elements in either a or b, or None, if no consistent
ordering exists.
"""
a_set = set(a)
b_set = set(b)
i = 0
j = 0
ordering = []
while i < len(a) and j < len(b):
if a[i] not in b_set:
ordering.append(a[i])
i += 1
elif b[j] not in a_set:
ordering.append(b[j])
j += 1
elif a[i] == b[j]:
ordering.append(a[i])
i += 1
j += 1
else:
return None
ordering.extend(a[i:])
ordering.extend(b[j:])
return ordering
@tc.returns(LabeledTensor, LabeledTensor, Axes)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def align(labeled_tensor_0, labeled_tensor_1, name=None):
"""Align the axes of two tensors so they may be broadcast to each other.
Axes are ordered by the current axis order scope, if present, or by the left-
most consistent ordering. An exception is raised if it is impossible to align
the tensors without a transpose (align never copies the input data).
Example usage:
>>> a = lt.LabeledTensor(tf.ones((2, 4)), ['x', 'z'])
>>> b = lt.LabeledTensor(tf.ones((3, 4)), ['y', 'z'])
>>> a2, b2, axes = lt.align(a, b)
>>> a2
<LabeledTensor 'lt_align_1/lt_align_1/0:...' shape=(2, 1, 4) dtype=float32
axes=[('x', Dimension(2)),
('y', Dimension(1)),
('z', Dimension(4))]>
>>> b2
<LabeledTensor 'lt_align_1/lt_align_1/1:...' shape=(1, 3, 4) dtype=float32
axes=[('x', Dimension(1)),
('y', Dimension(3)),
('z', Dimension(4))]>
>>> axes
Axes([('x', Dimension(2)),
('y', Dimension(3)),
('z', Dimension(4))])
Args:
labeled_tensor_0: An input tensor.
labeled_tensor_1: An input tensor.
name: Optional op name.
Returns:
The aligned tensors and the axes the resulting tensor would have if the two
aligned tensors were broadcast to each other. The aligned tensors have the
same rank but not necessarily the same shape, with axes in the same order.
Raises:
ValueError: If axes with the same name on the inputs are not equal.
AxisOrderError: If there is no way to reshape the input tensors into the
output without a transpose.
"""
with ops.name_scope(name, 'lt_align',
[labeled_tensor_0, labeled_tensor_1]) as scope:
labeled_tensor_0 = convert_to_labeled_tensor(labeled_tensor_0)
labeled_tensor_1 = convert_to_labeled_tensor(labeled_tensor_1)
axes_0 = labeled_tensor_0.axes
axes_1 = labeled_tensor_1.axes
for axis_name in axes_0:
if axis_name in axes_1:
if axes_0[axis_name] != axes_1[axis_name]:
raise ValueError('Mismatched %r axis on input tensors: %r and %r' %
(axis_name, axes_0[axis_name], axes_1[axis_name]))
axis_scope_order = get_axis_order()
if axis_scope_order is not None:
# we are in an axis_order_scope
axis_names_set = set(axes_0) | set(axes_1)
new_axis_names = [a for a in axis_scope_order if a in axis_names_set]
check_axis_order(labeled_tensor_0, axis_scope_order)
check_axis_order(labeled_tensor_1, axis_scope_order)
else:
# attempt to find a consistent ordering
new_axis_names = _find_consistent_ordering(list(axes_0), list(axes_1))
if new_axis_names is None:
raise AxisOrderError(
'No consistent axis order allows for aligning tensors with axis '
'orders %r and %r without copying data. Use transpose or '
'impose_axis_order to reorder axes on one of more of the inputs.' %
(axes_0.keys(), axes_1.keys()))
labeled_tensor_0 = expand_dims(labeled_tensor_0,
new_axis_names,
name=scope + '0')
labeled_tensor_1 = expand_dims(labeled_tensor_1,
new_axis_names,
name=scope + '1')
broadcast_axes = []
for axis_name in new_axis_names:
if axis_name in axes_0:
broadcast_axes.append(axes_0[axis_name])
else:
broadcast_axes.append(axes_1[axis_name])
return labeled_tensor_0, labeled_tensor_1, Axes(broadcast_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_unary_op(op_name, elementwise_function):
"""Define a unary operation for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on a single
tf.Tensor object. This function must accept two arguments: a tf.Tensor
object, and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor, name=None):
"""LabeledTensor version of `tf.{op_name}`.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = convert_to_labeled_tensor(labeled_tensor)
result_tensor = elementwise_function(labeled_tensor.tensor, name=scope)
return LabeledTensor(result_tensor, labeled_tensor.axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
abs_function = define_unary_op('abs', math_ops.abs)
neg = define_unary_op('neg', math_ops.neg)
sign = define_unary_op('sign', math_ops.sign)
reciprocal = define_unary_op('reciprocal', math_ops.reciprocal)
square = define_unary_op('square', math_ops.square)
round_function = define_unary_op('round', math_ops.round)
sqrt = define_unary_op('sqrt', math_ops.sqrt)
rsqrt = define_unary_op('rsqrt', math_ops.rsqrt)
exp = define_unary_op('exp', math_ops.exp)
log = define_unary_op('log', math_ops.log)
ceil = define_unary_op('ceil', math_ops.ceil)
floor = define_unary_op('floor', math_ops.floor)
cos = define_unary_op('cos', math_ops.cos)
sin = define_unary_op('sin', math_ops.sin)
tan = define_unary_op('tan', math_ops.tan)
acos = define_unary_op('acos', math_ops.acos)
asin = define_unary_op('asin', math_ops.asin)
atan = define_unary_op('atan', math_ops.atan)
lgamma = define_unary_op('lgamma', math_ops.lgamma)
digamma = define_unary_op('digamma', math_ops.digamma)
erf = define_unary_op('erf', math_ops.erf)
erfc = define_unary_op('erfc', math_ops.erfc)
logical_not = define_unary_op('logical_not', math_ops.logical_not)
tanh = define_unary_op('tanh', math_ops.tanh)
sigmoid = define_unary_op('sigmoid', math_ops.sigmoid)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_binary_op(op_name, elementwise_function):
"""Define a binary operation that broadcasts labeled tensors.
Args:
op_name: string name of the TensorFlow op.
elementwise_function: function to call to evaluate the op on tf.Tensor
objects. This function must accept three arguments: two tf.Tensor objects,
and an optional `name`.
Returns:
Function defining the given op that acts on LabeledTensors.
"""
default_name = 'lt_%s' % op_name
@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, LabeledTensorLike, tc.Optional(string_types))
def op(labeled_tensor_0, labeled_tensor_1, name=None):
"""LabeledTensor version of `tf.{op_name}` with label based alignment.
See `tf.{op_name}` for full details.
Args:
labeled_tensor_0: Input tensor.
labeled_tensor_1: Input tensor.
name: Optional op name.
Returns:
A LabeledTensor with result of applying `tf.{op_name}` elementwise.
"""
with ops.name_scope(name, default_name,
[labeled_tensor_0, labeled_tensor_1]) as scope:
align_0, align_1, broadcast_axes = align(labeled_tensor_0,
labeled_tensor_1)
tensor = elementwise_function(align_0.tensor, align_1.tensor, name=scope)
return LabeledTensor(tensor, broadcast_axes)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
add = define_binary_op('add', math_ops.add)
sub = define_binary_op('sub', math_ops.sub)
mul = define_binary_op('mul', math_ops.mul)
div = define_binary_op('div', math_ops.div)
mod = define_binary_op('mod', math_ops.mod)
pow_function = define_binary_op('pow', math_ops.pow)
equal = define_binary_op('equal', math_ops.equal)
greater = define_binary_op('greater', math_ops.greater)
greater_equal = define_binary_op('greater_equal', math_ops.greater_equal)
not_equal = define_binary_op('not_equal', math_ops.not_equal)
less = define_binary_op('less', math_ops.less)
less_equal = define_binary_op('less_equal', math_ops.less_equal)
logical_and = define_binary_op('logical_and', math_ops.logical_and)
logical_or = define_binary_op('logical_or', math_ops.logical_or)
logical_xor = define_binary_op('logical_xor', math_ops.logical_xor)
maximum = define_binary_op('maximum', math_ops.maximum)
minimum = define_binary_op('minimum', math_ops.minimum)
squared_difference = define_binary_op(
'squared_difference', math_ops.squared_difference)
igamma = define_binary_op('igamma', math_ops.igamma)
igammac = define_binary_op('igammac', math_ops.igammac)
zeta = define_binary_op('zeta', math_ops.zeta)
polygamma = define_binary_op('polygamma', math_ops.polygamma)
| apache-2.0 |
40223134/w16b_test | static/Brython3.1.3-20150514-095342/Lib/xml/dom/pulldom.py | 850 | 11761 | import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| agpl-3.0 |
vitan/hue | desktop/core/ext-py/python-ldap-2.3.13/Demo/rename.py | 40 | 1049 | import ldap
from getpass import getpass
# Create LDAPObject instance
l = ldap.initialize('ldap://localhost:1389',trace_level=1)
print 'Password:'
cred = getpass()
try:
# Set LDAP protocol version used
l.set_option(ldap.OPT_PROTOCOL_VERSION,3)
# Try a bind to provoke failure if protocol version is not supported
l.bind_s('cn=root,dc=stroeder,dc=com',cred,ldap.AUTH_SIMPLE)
print 'Using rename_s():'
l.rename_s(
'uid=fred,ou=Unstructured testing tree,dc=stroeder,dc=com',
'cn=Fred Feuerstein',
'dc=stroeder,dc=com',
0
)
l.rename_s(
'cn=Fred Feuerstein,dc=stroeder,dc=com',
'uid=fred',
'ou=Unstructured testing tree,dc=stroeder,dc=com',
0
)
m = l.rename(
'uid=fred,ou=Unstructured testing tree,dc=stroeder,dc=com',
'cn=Fred Feuerstein',
'dc=stroeder,dc=com',
0
)
r = l.result(m,1)
m = l.rename(
'cn=Fred Feuerstein,dc=stroeder,dc=com',
'uid=fred',
'ou=Unstructured testing tree,dc=stroeder,dc=com',
0
)
r = l.result(m,1)
finally:
l.unbind_s()
| apache-2.0 |
defaultnamehere/grr | lib/flows/general/collectors_test.py | 1 | 21712 | #!/usr/bin/env python
"""Test the collector flows."""
import os
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import artifact_test
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.flows.general import collectors
from grr.lib.flows.general import transfer
from grr.test_data import client_fixture
# pylint: mode=test
class CollectorTest(artifact_test.ArtifactTest):
pass
class TestArtifactCollectors(CollectorTest):
"""Test the artifact collection mechanism with fake artifacts."""
def setUp(self):
"""Make sure things are initialized."""
super(TestArtifactCollectors, self).setUp()
self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts
artifact_lib.ArtifactRegistry.ClearRegistry()
self.LoadTestArtifacts()
artifact_reg = artifact_lib.ArtifactRegistry.artifacts
self.fakeartifact = artifact_reg["FakeArtifact"]
self.fakeartifact2 = artifact_reg["FakeArtifact2"]
self.output_count = 0
with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd:
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(kb)
def tearDown(self):
super(TestArtifactCollectors, self).tearDown()
artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg
self.fakeartifact.collectors = [] # Reset any Collectors
self.fakeartifact.conditions = [] # Reset any Conditions
self.fakeartifact2.collectors = [] # Reset any Collectors
self.fakeartifact2.conditions = [] # Reset any Conditions
def testInterpolateArgs(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase())
collect_flow.current_artifact_name = "blah"
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test1"))
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test2"))
test_rdf = rdfvalue.KnowledgeBase()
action_args = {"usernames": ["%%users.username%%", "%%users.username%%"],
"nointerp": "asdfsdf", "notastring": test_rdf}
kwargs = collect_flow.InterpolateDict(action_args)
self.assertItemsEqual(kwargs["usernames"],
["test1", "test2", "test1", "test2"])
self.assertEqual(kwargs["nointerp"], "asdfsdf")
self.assertEqual(kwargs["notastring"], test_rdf)
# We should be using an array since users.username will expand to multiple
# values.
self.assertRaises(ValueError, collect_flow.InterpolateDict,
{"bad": "%%users.username%%"})
list_args = collect_flow.InterpolateList(["%%users.username%%",
"%%users.username%%aa"])
self.assertItemsEqual(list_args, ["test1", "test2", "test1aa", "test2aa"])
list_args = collect_flow.InterpolateList(["one"])
self.assertEqual(list_args, ["one"])
def testGrepRegexCombination(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
self.assertEqual(collect_flow._CombineRegex([r"simple"]),
"simple")
self.assertEqual(collect_flow._CombineRegex(["a", "b"]),
"(a)|(b)")
self.assertEqual(collect_flow._CombineRegex(["a", "b", "c"]),
"(a)|(b)|(c)")
self.assertEqual(collect_flow._CombineRegex(["a|b", "[^_]b", "c|d"]),
"(a|b)|([^_]b)|(c|d)")
def testGrep(self):
class MockCallFlow(object):
def CallFlow(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
mock_call_flow = MockCallFlow()
with utils.Stubber(collectors.ArtifactCollectorFlow, "CallFlow",
mock_call_flow.CallFlow):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase())
collect_flow.current_artifact_name = "blah"
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test1"))
collect_flow.state.knowledge_base.MergeOrAddUser(
rdfvalue.KnowledgeBaseUser(username="test2"))
collector = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GREP,
args={"path_list": ["/etc/passwd"],
"content_regex_list": [r"^a%%users.username%%b$"]})
collect_flow.Grep(collector, rdfvalue.PathSpec.PathType.TSK)
conditions = mock_call_flow.kwargs["conditions"]
self.assertEqual(len(conditions), 1)
regexes = conditions[0].contents_regex_match.regex.SerializeToString()
self.assertItemsEqual(regexes.split("|"), ["(^atest1b$)", "(^atest2b$)"])
self.assertEqual(mock_call_flow.kwargs["paths"], ["/etc/passwd"])
def testGetArtifact1(self):
"""Test we can get a basic artifact."""
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"FingerprintFile", "HashBuffer")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
# Dynamically add a Collector specifying the base path.
file_path = os.path.join(self.base_path, "test_img.dd")
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.FILE,
args={"path_list": [file_path]})
self.fakeartifact.collectors.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list, use_tsk=False,
token=self.token, client_id=self.client_id
):
pass
# Test the AFF4 file that was created.
fd1 = aff4.FACTORY.Open("%s/fs/os/%s" % (self.client_id, file_path),
token=self.token)
fd2 = open(file_path)
fd2.seek(0, 2)
self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE)))
def testRunGrrClientActionArtifact(self):
"""Test we can get a GRR client artifact."""
client_mock = action_mocks.ActionMock("ListProcesses")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": r"ListProcesses"})
self.fakeartifact.collectors.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output="test_artifact"
):
pass
# Test the AFF4 file that was created.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add("test_artifact"),
token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
self.assertTrue(len(fd) > 5)
def testRunGrrClientActionArtifactSplit(self):
"""Test that artifacts get split into separate collections."""
client_mock = action_mocks.ActionMock("ListProcesses", "StatFile")
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": r"ListProcesses"})
self.fakeartifact.collectors.append(coll1)
self.fakeartifact2.collectors.append(coll1)
artifact_list = ["FakeArtifact", "FakeArtifact2"]
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output="test_artifact",
split_output_by_artifact=True):
pass
# Check that we got two separate collections based on artifact name
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("test_artifact_FakeArtifact"),
token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
self.assertTrue(len(fd) > 5)
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("test_artifact_FakeArtifact2"),
token=self.token)
self.assertTrue(len(fd) > 5)
self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))
def testConditions(self):
"""Test we can get a GRR client artifact with conditions."""
# Run with false condition.
client_mock = action_mocks.ActionMock("ListProcesses")
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": "ListProcesses"},
conditions=["os == 'Windows'"])
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
# Now run with matching or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
# Now run with impossible or condition.
coll1.conditions.append("os == 'NotTrue'")
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
def testSupportedOS(self):
"""Test supported_os inside the collector object."""
# Run with false condition.
client_mock = action_mocks.ActionMock("ListProcesses")
coll1 = rdfvalue.Collector(
collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,
args={"client_action": "ListProcesses"}, supported_os=["Windows"])
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
# Now run with matching or condition.
coll1.conditions = []
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
# Now run with impossible or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
coll1.supported_os = ["NotTrue"]
self.fakeartifact.collectors = []
self.fakeartifact.collectors.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__.__name__, "AFF4Volume")
def _RunClientActionArtifact(self, client_mock, artifact_list):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
self.output_count += 1
output = "test_artifact_%d" % self.output_count
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
output=output
):
pass
# Test the AFF4 file was not created, as flow should not have run due to
# conditions.
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(output),
token=self.token)
return fd
class TestArtifactCollectorsInteractions(CollectorTest):
"""Test the collection of artifacts.
This class loads both real and test artifacts to test the interaction of badly
defined artifacts with real artifacts.
"""
def setUp(self):
"""Add test artifacts to existing registry."""
super(TestArtifactCollectorsInteractions, self).setUp()
self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts
self.LoadTestArtifacts()
def tearDown(self):
super(TestArtifactCollectorsInteractions, self).tearDown()
artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg
def testProcessCollectedArtifacts(self):
"""Test downloading files from artifacts."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile",
"ListDirectory")
# Get KB initialized
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
artifact_list = ["WindowsPersistenceMechanismFiles"]
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
output="analysis/{p}/{u}-{t}",
split_output_by_artifact=True):
pass
# Check MultiGetFile got called for our runkey files
# TODO(user): RunKeys for S-1-5-20 are not found because users.sid only
# expands to users with profiles.
pathspecs = getfile_instrument.args[0][0].args.pathspecs
self.assertItemsEqual([x.path for x in pathspecs],
[u"C:\\Windows\\TEMP\\A.exe"])
artifact_list = ["BadPathspecArtifact"]
with test_lib.Instrument(
transfer.MultiGetFile, "Start") as getfile_instrument:
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
output="analysis/{p}/{u}-{t}",
split_output_by_artifact=True):
pass
self.assertFalse(getfile_instrument.args)
class TestArtifactCollectorsRealArtifacts(CollectorTest):
"""Test the collection of real artifacts."""
def _CheckDriveAndRoot(self):
client_mock = action_mocks.ActionMock("StatFile", "ListDirectory")
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=[
"SystemDriveEnvironmentVariable"],
token=self.token, client_id=self.client_id,
output="testsystemdrive"):
pass
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(
self.client_id).Add("testsystemdrive"), token=self.token)
self.assertEqual(len(fd), 1)
self.assertEqual(str(fd[0]), "C:")
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock,
artifact_list=["SystemRoot"],
token=self.token, client_id=self.client_id,
output="testsystemroot"):
pass
fd = aff4.FACTORY.Open(
rdfvalue.RDFURN(self.client_id).Add("testsystemroot"), token=self.token)
self.assertEqual(len(fd), 1)
# Filesystem gives WINDOWS, registry gives Windows
self.assertTrue(str(fd[0]) in [r"C:\Windows", r"C:\WINDOWS"])
def testSystemDriveArtifact(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
class BrokenClientMock(action_mocks.ActionMock):
def StatFile(self, _):
raise IOError
def ListDirectory(self, _):
raise IOError
# No registry, broken filesystem, this should just raise.
with self.assertRaises(RuntimeError):
for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow",
BrokenClientMock(), artifact_list=[
"SystemDriveEnvironmentVariable"],
token=self.token,
client_id=self.client_id,
output="testsystemdrive"):
pass
# No registry, so this should use the fallback flow
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
self._CheckDriveAndRoot()
# Registry is present, so this should use the regular artifact collection
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
self._CheckDriveAndRoot()
def testRunWMIComputerSystemProductArtifact(self):
class WMIActionMock(action_mocks.ActionMock):
def WmiQuery(self, _):
return client_fixture.WMI_CMP_SYS_PRD
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
client_mock = WMIActionMock()
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock,
artifact_list=["WMIComputerSystemProduct"], token=self.token,
client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,
store_results_in_aff4=True):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token,)
hardware = client.Get(client.Schema.HARDWARE_INFO)
self.assertTrue(isinstance(hardware, rdfvalue.HardwareInfo))
self.assertEqual(str(hardware.serial_number), "2RXYYZ1")
def testRunWMIArtifact(self):
class WMIActionMock(action_mocks.ActionMock):
def WmiQuery(self, _):
return client_fixture.WMI_SAMPLE
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
client_mock = WMIActionMock()
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=["WMILogicalDisks"],
token=self.token, client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,
store_results_in_aff4=True):
pass
# Test that we set the client VOLUMES attribute
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdfvalue.Volume))
self.assertTrue(result.windows.drive_letter in ["Z:", "C:"])
if result.windows.drive_letter == "C:":
self.assertAlmostEqual(result.FreeSpacePercent(), 76.142, delta=0.001)
self.assertEqual(result.Name(), "C:")
elif result.windows.drive_letter == "Z:":
self.assertEqual(result.Name(), "homefileshare$")
self.assertAlmostEqual(result.FreeSpacePercent(), 58.823, delta=0.001)
def testRetrieveDependencies(self):
"""Test getting an artifact without a KB using retrieve_depdendencies."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_VERSION("6.2"))
client.Flush()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "FingerprintFile",
"ListDirectory")
artifact_list = ["WinDirEnvironmentVariable"]
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, artifact_list=artifact_list,
token=self.token, client_id=self.client_id,
dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.FETCH_NOW,
output="testRetrieveDependencies"):
pass
output = aff4.FACTORY.Open(self.client_id.Add("testRetrieveDependencies"),
token=self.token)
self.assertEqual(len(output), 1)
self.assertEqual(output[0], r"C:\Windows")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
flavour/tldrmp | modules/s3/s3validators.py | 2 | 119990 | # -*- coding: utf-8 -*-
""" Custom Validators
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: (c) 2010-2013 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["single_phone_number_pattern",
"multi_phone_number_pattern",
"s3_single_phone_requires",
"s3_phone_requires",
"IS_ACL",
"IS_ADD_PERSON_WIDGET",
"IS_ADD_PERSON_WIDGET2",
"IS_COMBO_BOX",
"IS_FLOAT_AMOUNT",
"IS_INT_AMOUNT",
"IS_IN_SET_LAZY",
"IS_HTML_COLOUR",
"IS_LAT",
"IS_LON",
"IS_LOCATION",
"IS_LOCATION_SELECTOR",
"IS_LOCATION_SELECTOR2",
"IS_ONE_OF",
"IS_ONE_OF_EMPTY",
"IS_ONE_OF_EMPTY_SELECT",
"IS_NOT_ONE_OF",
"IS_PROCESSED_IMAGE",
"IS_SITE_SELECTOR",
"IS_UTC_DATETIME",
"IS_UTC_OFFSET",
"QUANTITY_INV_ITEM",
]
import re
import time
from datetime import datetime, timedelta
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
#from gluon import current
#from gluon.dal import Field
#from gluon.validators import IS_DATE_IN_RANGE, IS_MATCH, IS_NOT_IN_DB, IS_IN_SET, IS_INT_IN_RANGE, IS_FLOAT_IN_RANGE, IS_EMAIL
from gluon.languages import lazyT
from gluon.storage import Storage
from gluon.validators import Validator
from s3utils import S3DateTime, s3_orderby_fields, s3_unicode
def translate(text):
if text is None:
return None
elif isinstance(text, (str, unicode)):
from globals import current
if hasattr(current, "T"):
return str(current.T(text))
return str(text)
def options_sorter(x, y):
return (s3_unicode(x[1]).upper() > s3_unicode(y[1]).upper() and 1) or -1
# -----------------------------------------------------------------------------
# Phone number requires
# Multiple phone numbers can be separated by comma, slash, semi-colon.
# (Semi-colon appears in Brazil OSM data.)
# @ToDo: Need to beware of separators used inside phone numbers
# (e.g. 555-1212, ext 9), so may need fancier validation if we see that.
# @ToDo: Add tooltip giving list syntax, and warning against above.
# (Current use is in importing OSM files, so isn't interactive.)
# @ToDo: Code that should only have a single # should use
# s3_single_phone_requires. Check what messaging assumes.
phone_number_pattern = "\+?\s*[\s\-\.\(\)\d]+(?:(?: x| ext)\s?\d{1,5})?"
single_phone_number_pattern = "%s$" % phone_number_pattern
multi_phone_number_pattern = "%s(\s*(,|/|;)\s*%s)*$" % (phone_number_pattern,
phone_number_pattern)
s3_single_phone_requires = IS_MATCH(single_phone_number_pattern)
s3_phone_requires = IS_MATCH(multi_phone_number_pattern,
error_message=current.T("Invalid phone number!"))
# =============================================================================
class IS_LAT(object):
"""
example:
INPUT(_type="text", _name="name", requires=IS_LAT())
Latitude has to be in decimal degrees between -90 & 90
- we attempt to convert DMS format into decimal degrees
"""
def __init__(self,
error_message = "Latitude/Northing should be between -90 & 90!"
):
self.minimum = -90
self.maximum = 90
self.error_message = error_message
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
try:
value = float(value)
if self.minimum <= value <= self.maximum:
return (value, None)
else:
return (value, self.error_message)
except:
pattern = re.compile("^[0-9]{,3}[\D\W][0-9]{,3}[\D\W][0-9]+$")
if not pattern.match(value):
return (value, self.error_message)
else:
val = []
val.append(value)
sep = []
count = 0
for i in val[0]:
try:
int(i)
count += 1
except:
sep.append(count)
count += 1
sec = ""
posn = sep[1]
while posn != (count-1):
sec = sec + val[0][posn+1]#to join the numbers for seconds
posn += 1
posn2 = sep[0]
mins = ""
while posn2 != (sep[1]-1):
mins = mins + val[0][posn2+1]# to join the numbers for minutes
posn2 += 1
deg = ""
posn3 = 0
while posn3 != (sep[0]):
deg = deg + val[0][posn3] # to join the numbers for degree
posn3 += 1
e = int(sec)/60 #formula to get back decimal degree
f = int(mins) + e #formula
g = int(f) / 60 #formula
value = int(deg) + g
return (value, None)
# =============================================================================
class IS_LON(object):
"""
example:
INPUT(_type="text", _name="name", requires=IS_LON())
Longitude has to be in decimal degrees between -180 & 180
- we attempt to convert DMS format into decimal degrees
"""
def __init__(self,
error_message = "Longitude/Easting should be between -180 & 180!"
):
self.minimum = -180
self.maximum = 180
self.error_message = error_message
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
try:
value = float(value)
if self.minimum <= value <= self.maximum:
return (value, None)
else:
return (value, self.error_message)
except:
pattern = re.compile("^[0-9]{,3}[\D\W][0-9]{,3}[\D\W][0-9]+$")
if not pattern.match(value):
return (value, self.error_message)
else:
val = []
val.append(value)
sep = []
count = 0
for i in val[0]:
try:
int(i)
count += 1
except:
sep.append(count)
count += 1
sec = ""
posn = sep[1]
while posn != (count-1):
sec = sec + val[0][posn+1]#to join the numbers for seconds
posn += 1
posn2 = sep[0]
mins = ""
while posn2 != (sep[1]-1):
mins = mins + val[0][posn2+1]# to join the numbers for minutes
posn2 += 1
deg = ""
posn3 = 0
while posn3 != (sep[0]):
deg = deg + val[0][posn3] # to join the numbers for degree
posn3 += 1
e = int(sec)/60 #formula to get back decimal degree
f = int(mins) + e #formula
g = int(f) / 60 #formula
value = int(deg) + g
return (value, None)
# =============================================================================
class IS_NUMBER(object):
"""
Used by s3data.py to wrap IS_INT_AMOUNT & IS_LOAT_AMOUNT
"""
# -------------------------------------------------------------------------
@staticmethod
def represent(number, precision=2):
if number is None:
return ""
if isinstance(number, int):
return IS_INT_AMOUNT.represent(number)
elif isinstance(number, float):
return IS_FLOAT_AMOUNT.represent(number, precision)
else:
return number
# =============================================================================
class IS_INT_AMOUNT(IS_INT_IN_RANGE):
"""
Validation, widget and representation of
integer-values with thousands-separators
"""
def __init__(self,
minimum=None,
maximum=None,
error_message=None):
IS_INT_IN_RANGE.__init__(self,
minimum=minimum,
maximum=maximum,
error_message=error_message)
# -------------------------------------------------------------------------
def __call__(self, value):
thousands_sep = ","
value = str(value).replace(thousands_sep, "")
return IS_INT_IN_RANGE.__call__(self, value)
# -------------------------------------------------------------------------
@staticmethod
def represent(number):
"""
Change the format of the number depending on the language
Based on https://code.djangoproject.com/browser/django/trunk/django/utils/numberformat.py
"""
if number is None:
return ""
try:
intnumber = int(number)
except:
intnumber = number
settings = current.deployment_settings
THOUSAND_SEPARATOR = settings.get_L10n_thousands_separator()
NUMBER_GROUPING = settings.get_L10n_thousands_grouping()
# The negative/positive sign for the number
if float(number) < 0:
sign = "-"
else:
sign = ""
str_number = unicode(intnumber)
if str_number[0] == "-":
str_number = str_number[1:]
# Walk backwards over the integer part, inserting the separator as we go
int_part_gd = ""
for cnt, digit in enumerate(str_number[::-1]):
if cnt and not cnt % NUMBER_GROUPING:
int_part_gd += THOUSAND_SEPARATOR
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part
# -------------------------------------------------------------------------
@staticmethod
def widget(f, v, **attributes):
from gluon.sqlhtml import StringWidget
attr = Storage(attributes)
classes = attr.get("_class", "").split(" ")
classes = " ".join([c for c in classes if c != "integer"])
_class = "%s int_amount" % classes
attr.update(_class=_class)
return StringWidget.widget(f, v, **attr)
# =============================================================================
class IS_FLOAT_AMOUNT(IS_FLOAT_IN_RANGE):
"""
Validation, widget and representation of
float-values with thousands-separators
"""
def __init__(self,
minimum=None,
maximum=None,
error_message=None,
dot="."):
IS_FLOAT_IN_RANGE.__init__(self,
minimum=minimum,
maximum=maximum,
error_message=error_message,
dot=dot)
# -------------------------------------------------------------------------
def __call__(self, value):
thousands_sep = ","
value = str(value).replace(thousands_sep, "")
return IS_FLOAT_IN_RANGE.__call__(self, value)
# -------------------------------------------------------------------------
@staticmethod
def represent(number, precision=None):
"""
Change the format of the number depending on the language
Based on https://code.djangoproject.com/browser/django/trunk/django/utils/numberformat.py
"""
if number is None:
return ""
DECIMAL_SEPARATOR = current.deployment_settings.get_L10n_decimal_separator()
str_number = unicode(number)
if "." in str_number:
int_part, dec_part = str_number.split(".")
if precision is not None:
dec_part = dec_part[:precision]
else:
int_part, dec_part = str_number, ""
if int(dec_part) == 0:
dec_part = ""
elif precision is not None:
dec_part = dec_part + ("0" * (precision - len(dec_part)))
if dec_part:
dec_part = DECIMAL_SEPARATOR + dec_part
int_part = IS_INT_AMOUNT.represent(int(int_part))
return int_part + dec_part
# -------------------------------------------------------------------------
@staticmethod
def widget(f, v, **attributes):
from gluon.sqlhtml import StringWidget
attr = Storage(attributes)
classes = attr.get("_class", "").split(" ")
classes = " ".join([c for c in classes if c != "double"])
_class = "%s float_amount" % classes
attr.update(_class=_class)
return StringWidget.widget(f, v, **attr)
# =============================================================================
class IS_HTML_COLOUR(IS_MATCH):
"""
example::
INPUT(_type="text", _name="name", requires=IS_HTML_COLOUR())
"""
def __init__(self,
error_message="must be a 6 digit hex code! (format: rrggbb)"
):
IS_MATCH.__init__(self, "^[0-9a-fA-F]{6}$", error_message)
# =============================================================================
regex1 = re.compile("[\w_]+\.[\w_]+")
regex2 = re.compile("%\((?P<name>[^\)]+)\)s")
class IS_ONE_OF_EMPTY(Validator):
"""
Filtered version of IS_IN_DB():
validates a given value as key of another table, filtered by the
'filterby' field for one of the 'filter_opts' options
(=a selective IS_IN_DB())
NB Filtering isn't active in GQL.
For the dropdown representation:
'label' can be a string template for the record, or a set of field
names of the fields to be used as option labels, or a function or
lambda to create an option label from the respective record (which
has to return a string, of course). The function will take the
record as an argument.
No 'options' method as designed to be called next to an
Autocomplete field so don't download a large dropdown
unnecessarily.
"""
def __init__(self,
dbset,
field,
label=None,
filterby=None,
filter_opts=None,
not_filterby=None,
not_filter_opts=None,
realms=None,
updateable=False,
instance_types=None,
error_message="invalid value!",
orderby=None,
groupby=None,
left=None,
multiple=False,
zero="",
sort=True,
_and=None,
):
"""
Validator for foreign keys.
@param dbset: a Set of records like db(query), or db itself
@param field: the field in the referenced table
@param label: lookup method for the label corresponding a value,
alternatively a string template to be filled with
values from the record
@param filterby: a field in the referenced table to filter by
@param filter_opts: values for the filterby field which indicate
records to include
@param not_filterby: a field in the referenced table to filter by
@param not_filter_opts: values for not_filterby field which indicate
records to exclude
@param realms: only include records belonging to the listed realms
(if None, all readable records will be included)
@param updateable: only include records in the referenced table which
can be updated by the user (if False, all readable
records will be included)
@param instance_types: if the referenced table is a super-entity, then
only include these instance types (this parameter
is required for super entity lookups!)
@param error_message: the error message to return for failed validation
@param orderby: orderby for the options
@param groupby: groupby for the options
@param left: additional left joins required for the options lookup
(super-entity instance left joins will be included
automatically)
@param multiple: allow multiple values (for list:reference types)
@param zero: add this as label for the None-option (allow selection of "None")
@param sort: sort options alphabetically by their label
@param _and: internal use
"""
if hasattr(dbset, "define_table"):
self.dbset = dbset()
else:
self.dbset = dbset
(ktable, kfield) = str(field).split(".")
if not label:
label = "%%(%s)s" % kfield
if isinstance(label, str):
if regex1.match(str(label)):
label = "%%(%s)s" % str(label).split(".")[-1]
ks = regex2.findall(label)
if not kfield in ks:
ks += [kfield]
fields = ["%s.%s" % (ktable, k) for k in ks]
elif hasattr(label, "bulk"):
# S3Represent
ks = [kfield]
if label.custom_lookup:
# Represent uses a custom lookup, so we only
# retrieve the keys here
fields = [kfield]
orderby = field
else:
# Represent uses a standard field lookup, so
# we can do that right here
label._setup()
fields = list(label.fields)
if kfield not in fields:
fields.insert(0, kfield)
# Unlikely, but possible: represent and validator
# using different keys - commented for now for
# performance reasons (re-enable if ever necessary)
#key = label.key
#if key and key not in fields:
#fields.insert(0, key)
else:
ks = [kfield]
try:
table = current.s3db[ktable]
fields =[str(f) for f in table if f.name not in ("wkt", "the_geom")]
except RuntimeError:
fields = "all"
self.fields = fields
self.label = label
self.ktable = ktable
if not kfield or not len(kfield):
self.kfield = "id"
else:
self.kfield = kfield
self.ks = ks
self.error_message = error_message
self.theset = None
self.orderby = orderby
self.groupby = groupby
self.left = left
self.multiple = multiple
self.zero = zero
self.sort = sort
self._and = _and
self.filterby = filterby
self.filter_opts = filter_opts
self.not_filterby = not_filterby
self.not_filter_opts = not_filter_opts
self.realms = realms
self.updateable = updateable
self.instance_types = instance_types
# -------------------------------------------------------------------------
def set_self_id(self, id):
if self._and:
self._and.record_id = id
# -------------------------------------------------------------------------
def set_filter(self,
filterby = None,
filter_opts = None,
not_filterby = None,
not_filter_opts = None):
"""
This can be called from prep to apply a filter base on
data in the record or the primary resource id.
"""
if filterby:
self.filterby = filterby
if filter_opts:
self.filter_opts = filter_opts
if not_filterby:
self.not_filterby = not_filterby
if not_filter_opts:
self.not_filter_opts = not_filter_opts
# -------------------------------------------------------------------------
def build_set(self):
dbset = self.dbset
db = dbset._db
ktablename = self.ktable
if ktablename not in db:
table = current.s3db.table(ktablename, db_only=True)
else:
table = db[ktablename]
if table:
if self.fields == "all":
fields = [table[f] for f in table.fields if f not in ("wkt", "the_geom")]
else:
fieldnames = [f.split(".")[1] if "." in f else f for f in self.fields]
fields = [table[k] for k in fieldnames if k in table.fields]
if db._dbname not in ("gql", "gae"):
orderby = self.orderby or reduce(lambda a, b: a|b, fields)
groupby = self.groupby
# Caching breaks Colorbox dropdown refreshes
#dd = dict(orderby=orderby, groupby=groupby, cache=(current.cache.ram, 60))
dd = dict(orderby=orderby, groupby=groupby)
method = "update" if self.updateable else "read"
query, left = self.accessible_query(method, table,
instance_types=self.instance_types)
if "deleted" in table:
query &= (table["deleted"] != True)
# Realms filter?
if self.realms:
auth = current.auth
if auth.is_logged_in() and \
auth.get_system_roles().ADMIN in auth.user.realms:
# Admin doesn't filter
pass
else:
query &= auth.permission.realm_query(table, self.realms)
all_fields = [str(f) for f in fields]
filterby = self.filterby
if filterby and filterby in table:
filter_opts = self.filter_opts
if filter_opts:
if None in filter_opts:
# Needs special handling (doesn't show up in 'belongs')
_query = (table[filterby] == None)
filter_opts = [f for f in filter_opts if f is not None]
if filter_opts:
_query = _query | (table[filterby].belongs(filter_opts))
query &= _query
else:
query &= (table[filterby].belongs(filter_opts))
if not self.orderby:
filterby_field = table[filterby]
dd.update(orderby=filterby_field)
if str(filterby_field) not in all_fields:
fields.append(filterby_field)
all_fields.append(str(filterby_field))
not_filterby = self.not_filterby
if not_filterby and not_filterby in table:
not_filter_opts = self.not_filter_opts
if not_filter_opts:
if None in not_filter_opts:
# Needs special handling (doesn't show up in 'belongs')
_query = (table[not_filterby] == None)
not_filter_opts = [f for f in not_filter_opts if f is not None]
if not_filter_opts:
_query = _query | (table[not_filterby].belongs(not_filter_opts))
query &= (~_query)
else:
query &= (~(table[not_filterby].belongs(not_filter_opts)))
if not self.orderby:
filterby_field = table[not_filterby]
dd.update(orderby=filterby_field)
if str(filterby_field) not in all_fields:
fields.append(filterby_field)
all_fields.append(str(filterby_field))
if left is not None:
if self.left is not None:
if not isinstance(left, list):
left = [left]
ljoins = [str(join) for join in self.left]
for join in left:
ljoin = str(join)
if ljoin not in ljoins:
self.left.append(join)
ljoins.append(ljoin)
else:
self.left = left
if self.left is not None:
dd.update(left=self.left)
# Make sure we have all ORDERBY fields in the query
# (otherwise postgresql will complain)
fieldnames = [str(f) for f in fields]
for f in s3_orderby_fields(table, dd.get("orderby")):
if str(f) not in fieldnames:
fields.append(f)
fieldnames.append(str(f))
records = dbset(query).select(distinct=True, *fields, **dd)
else:
# Note this does not support filtering.
orderby = self.orderby or \
reduce(lambda a, b: a|b, (f for f in fields
if f.type != "id"))
# Caching breaks Colorbox dropdown refreshes
#dd = dict(orderby=orderby, cache=(current.cache.ram, 60))
dd = dict(orderby=orderby)
records = dbset.select(db[self.ktable].ALL, **dd)
self.theset = [str(r[self.kfield]) for r in records]
label = self.label
try:
# Is callable
if hasattr(label, "bulk"):
# S3Represent => use bulk option
d = label.bulk(None,
rows=records,
list_type=False,
show_link=False)
labels = [d.get(r[self.kfield], d[None]) for r in records]
else:
# Standard representation function
labels = map(label, records)
except TypeError:
if isinstance(label, str):
labels = map(lambda r: label % dict(r), records)
elif isinstance(label, (list, tuple)):
labels = map(lambda r: \
" ".join([r[l] for l in label if l in r]),
records)
elif "name" in table:
labels = map(lambda r: r.name, records)
else:
labels = map(lambda r: r[self.kfield], records)
self.labels = labels
if labels and self.sort:
items = zip(self.theset, self.labels)
# Alternative variant that handles generator objects,
# doesn't seem necessary, retained here just in case:
#orig_labels = self.labels
#orig_theset = self.theset
#items = []
#for i in xrange(len(orig_theset)):
#label = orig_labels[i]
##if hasattr(label, "flatten"):
##try:
##label = label.flatten()
##except:
##pass
#items.append((orig_theset[i], label))
items.sort(key=lambda item: s3_unicode(item[1]).lower())
self.theset, self.labels = zip(*items)
else:
self.theset = None
self.labels = None
# -------------------------------------------------------------------------
@classmethod
def accessible_query(cls, method, table, instance_types=None):
"""
Returns an accessible query (and left joins, if necessary) for
records in table the user is permitted to access with method
@param method: the method (e.g. "read" or "update")
@param table: the table
@param instance_types: list of instance tablenames, if table is
a super-entity (required in this case!)
@return: tuple (query, left) where query is the query and left joins
is the list of left joins required for the query
@note: for higher security policies and super-entities with many
instance types this can give a very complex query. Try to
always limit the instance types to what is really needed
"""
DEFAULT = (table._id > 0)
left = None
if "instance_type" in table:
# Super-entity
if not instance_types:
return DEFAULT, left
query = None
auth = current.auth
s3db = current.s3db
for instance_type in instance_types:
itable = s3db.table(instance_type)
if itable is None:
continue
join = itable.on(itable[table._id.name] == table._id)
if left is None:
left = [join]
else:
left.append(join)
q = (itable._id != None) & \
auth.s3_accessible_query(method, itable)
if "deleted" in itable:
q &= itable.deleted != True
if query is None:
query = q
else:
query |= q
if query is None:
query = DEFAULT
else:
query = current.auth.s3_accessible_query(method, table)
return query, left
# -------------------------------------------------------------------------
# Removed as we don't want any options downloaded unnecessarily
#def options(self):
# -------------------------------------------------------------------------
def __call__(self, value):
try:
dbset = self.dbset
table = dbset._db[self.ktable]
deleted_q = ("deleted" in table) and (table["deleted"] == False) or False
filter_opts_q = False
filterby = self.filterby
if filterby and filterby in table:
filter_opts = self.filter_opts
if filter_opts:
if None in filter_opts:
# Needs special handling (doesn't show up in 'belongs')
filter_opts_q = (table[filterby] == None)
filter_opts = [f for f in filter_opts if f is not None]
if filter_opts:
filter_opts_q |= (table[filterby].belongs(filter_opts))
else:
filter_opts_q = (table[filterby].belongs(filter_opts))
if self.multiple:
if isinstance(value, list):
values = [str(v) for v in value]
elif isinstance(value, basestring) and \
value[0] == "|" and value[-1] == "|":
values = value[1:-1].split("|")
elif value:
values = [value]
else:
values = []
if self.theset:
if not [x for x in values if not x in self.theset]:
return (values, None)
else:
return (value, self.error_message)
else:
field = table[self.kfield]
query = None
for v in values:
q = (field == v)
query = query is not None and query | q or q
if filter_opts_q != False:
query = query is not None and \
(filter_opts_q & (query)) or filter_opts_q
if deleted_q != False:
query = query is not None and \
(deleted_q & (query)) or deleted_q
if dbset(query).count() < 1:
return (value, self.error_message)
return (values, None)
elif self.theset:
if str(value) in self.theset:
if self._and:
return self._and(value)
else:
return (value, None)
else:
values = [value]
query = None
for v in values:
q = (table[self.kfield] == v)
query = query is not None and query | q or q
if filter_opts_q != False:
query = query is not None and \
(filter_opts_q & (query)) or filter_opts_q
if deleted_q != False:
query = query is not None and \
(deleted_q & (query)) or deleted_q
if dbset(query).count():
if self._and:
return self._and(value)
else:
return (value, None)
except:
pass
return (value, self.error_message)
# =============================================================================
class IS_ONE_OF(IS_ONE_OF_EMPTY):
"""
Extends IS_ONE_OF_EMPTY by restoring the 'options' method.
"""
def options(self, zero=True):
self.build_set()
theset, labels = self.theset, self.labels
if theset is None or labels is None:
items = []
else:
items = zip(theset, labels)
if zero and self.zero is not None and not self.multiple:
items.insert(0, ("", self.zero))
return items
# =============================================================================
class IS_ONE_OF_EMPTY_SELECT(IS_ONE_OF_EMPTY):
"""
Extends IS_ONE_OF_EMPTY by displaying an empty SELECT (instead of INPUT)
"""
def options(self, zero=True):
return [("", "")]
# =============================================================================
class IS_NOT_ONE_OF(IS_NOT_IN_DB):
"""
Filtered version of IS_NOT_IN_DB()
- understands the 'deleted' field.
- makes the field unique (amongst non-deleted field)
Example:
- INPUT(_type="text", _name="name", requires=IS_NOT_ONE_OF(db, db.table))
"""
def __call__(self, value):
value = str(value)
if not value.strip():
return (value, translate(self.error_message))
if value in self.allowed_override:
return (value, None)
(tablename, fieldname) = str(self.field).split(".")
dbset = self.dbset
table = dbset.db[tablename]
field = table[fieldname]
query = (field == value)
if "deleted" in table:
query = (table["deleted"] == False) & query
rows = dbset(query).select(limitby=(0, 1))
if len(rows) > 0:
if isinstance(self.record_id, dict):
for f in self.record_id:
if str(getattr(rows[0], f)) != str(self.record_id[f]):
return (value, translate(self.error_message))
elif str(rows[0][table._id.name]) != str(self.record_id):
return (value, translate(self.error_message))
return (value, None)
# =============================================================================
class IS_LOCATION(Validator):
"""
Allow all locations, or locations by level.
"""
def __init__(self,
level = None,
error_message = None
):
self.level = level # can be a List or a single element
self.error_message = error_message
# Make it like IS_ONE_OF to support AddResourceLink
self.ktable = "gis_location"
self.kfield = "id"
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
level = self.level
if level == "L0":
# Use cached countries. This returns name if id is for a country.
try:
location_id = int(value)
except ValueError:
ok = False
else:
ok = current.gis.get_country(location_id)
else:
db = current.db
table = db.gis_location
query = (table.id == value) & (table.deleted == False)
if level:
if not hasattr(level, "strip") and \
(hasattr(level, "__getitem__") or \
hasattr(level, "__iter__")):
# List or Tuple
if None in level:
# None needs special handling
level = [l for l in level if l is not None]
query &= ((table.level.belongs(level)) | \
(table.level == None))
else:
query &= (table.level.belongs(level))
else:
query &= (table.level == level)
ok = db(query).select(table.id, limitby=(0, 1))
if ok:
return (value, None)
else:
return (value, self.error_message or current.T("Invalid Location!"))
# =============================================================================
class IS_LOCATION_SELECTOR(Validator):
"""
Designed for use within the S3LocationSelectorWidget.
For Create forms, this will create a new location from the additional fields
For Update forms, this will check that we have a valid location_id FK and update any changes
@ToDo: Audit
"""
def __init__(self,
error_message = None,
):
self.error_message = error_message
self.errors = Storage()
self.id = None
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
if current.response.s3.bulk:
# Pointless in imports
return (value, None)
db = current.db
table = db.gis_location
if value == "dummy":
# Create form
if not current.auth.s3_has_permission("create", table):
return (None, current.auth.messages.access_denied)
location = self._process_values()
if self.errors:
errors = self.errors
error = ""
for e in errors:
error = "%s\n%s" % (error, errors[e]) if error else errors[e]
return (None, error)
if location.name or location.lat or location.lon or location.wkt or \
location.street or location.postcode or location.parent:
vars = dict(name = location.name,
lat = location.lat,
lon = location.lon,
wkt = location.wkt,
gis_feature_type = location.gis_feature_type,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent,
lon_min = location.lon_min,
lon_max = location.lon_max,
lat_min = location.lat_min,
lat_max = location.lat_max
)
if vars["wkt"] and current.deployment_settings.get_gis_spatialdb():
# Also populate the spatial field
vars["the_geom"] = vars["wkt"]
value = table.insert(**vars)
# onaccept
vars["id"] = value
current.gis.update_location_tree(vars)
return (value, None)
else:
return (None, None)
else:
# This must be an Update form
if not current.auth.s3_has_permission("update", table, record_id=value):
return (value, current.auth.messages.access_denied)
# Check that this is a valid location_id
query = (table.id == value) & \
(table.deleted == False) & \
(table.level == None) # NB Specific Locations only
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Update the record, in case changes have been made
self.id = value
location = self._process_values()
if self.errors:
errors = self.errors
error = ""
for e in errors:
error = "%s\n%s" % (error, errors[e]) if error else errors[e]
return (value, error)
vars = dict(name = location.name,
lat = location.lat,
lon = location.lon,
inherited = location.inherited,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent,
wkt = location.wkt,
lon_min = location.lon_min,
lon_max = location.lon_max,
lat_min = location.lat_min,
lat_max = location.lat_max
)
if vars["wkt"] and current.deployment_settings.get_gis_spatialdb():
# Also populate the spatial field
vars["the_geom"] = vars["wkt"]
db(table.id == value).update(**vars)
# onaccept
vars["id"] = value
current.gis.update_location_tree(vars)
return (value, None)
else:
return (value, self.error_message or current.T("Invalid Location!"))
# -------------------------------------------------------------------------
def _process_values(self):
"""
Read the request.vars & prepare for a record insert/update
Note: This is also used by IS_SITE_SELECTOR()
"""
# Rough check for valid Lat/Lon (detailed later)
vars = current.request.vars
lat = vars.get("gis_location_lat", None)
lon = vars.get("gis_location_lon", None)
if lat:
try:
lat = float(lat)
except ValueError:
self.errors["lat"] = current.T("Latitude is Invalid!")
if lon:
try:
lon = float(lon)
except ValueError:
self.errors["lon"] = current.T("Longitude is Invalid!")
if self.errors:
return None
L0 = vars.get("gis_location_L0", None)
db = current.db
table = db.gis_location
# Are we allowed to create Locations?
auth = current.auth
def permitted_to_create():
if not auth.s3_has_permission("create", table):
self.errors["location_id"] = auth.messages.access_denied
return False
return True
# What level of hierarchy are we allowed to edit?
s3db = current.s3db
if auth.s3_has_role(current.session.s3.system_roles.MAP_ADMIN):
# 'MapAdmin' always has permission to edit hierarchy locations
L1_allowed = True
L2_allowed = True
L3_allowed = True
L4_allowed = True
L5_allowed = True
else:
if L0:
htable = s3db.gis_hierarchy
query = (htable.location_id == L0)
config = db(query).select(htable.edit_L1,
htable.edit_L2,
htable.edit_L3,
htable.edit_L4,
htable.edit_L5,
limitby=(0, 1)).first()
if L0 and config:
# Lookup each level individually
L1_allowed = config.edit_L1
L2_allowed = config.edit_L2
L3_allowed = config.edit_L3
L4_allowed = config.edit_L4
L5_allowed = config.edit_L5
else:
# default is True
L1_allowed = True
L2_allowed = True
L3_allowed = True
L4_allowed = True
L5_allowed = True
# We don't need to do onvalidation of the Location Hierarchy records
# separately as we don't have anything extra to validate than we have
# done already
onaccept = current.gis.update_location_tree
L1 = vars.get("gis_location_L1", None)
L2 = vars.get("gis_location_L2", None)
L3 = vars.get("gis_location_L3", None)
L4 = vars.get("gis_location_L4", None)
L5 = vars.get("gis_location_L5", None)
# Check if we have parents to create
# L1
if L1:
try:
# Is this an ID?
L1 = int(L1)
# Do we need to update it's parent?
if L0:
location = db(table.id == L1).select(table.name,
table.parent,
limitby=(0, 1)
).first()
if location and (location.parent != int(L0)):
db(query).update(parent = L0)
location["level"] = "L1"
location["id"] = L1
onaccept(location)
except:
# Name
# Test for duplicates
query = (table.name == L1) & (table.level == "L1")
if L0:
query &= (table.parent == L0)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L1 = location.id
elif L1_allowed:
if permitted_to_create():
if L0:
f = dict(name = L1,
level = "L1",
parent = L0,
)
L1 = table.insert(**f)
f["id"] = L1
onaccept(f)
else:
f = dict(name=L1,
level="L1",
)
L1 = table.insert(**f)
f["id"] = L1
onaccept(f)
else:
return None
else:
L1 = None
# L2
if L2:
try:
# Is this an ID?
L2 = int(L2)
# Do we need to update it's parent?
if L1:
location = db(table.id == L2).select(table.name,
table.parent,
limitby=(0, 1)).first()
if location and (location.parent != L1):
db(query).update(parent=L1)
location["level"] = "L2"
location["id"] = L2
onaccept(location)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L2 parenting direct to L0
query = (table.name == L2) & (table.level == "L2")
if L1:
query &= (table.parent == L1)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L2 = location.id
elif L2_allowed:
if permitted_to_create():
if L1:
f = dict(name=L2,
level="L2",
parent=L1,
)
L2 = table.insert(**f)
f["id"] = L2
onaccept(f)
elif L0:
f = dict(name=L2,
level="L2",
parent=L0,
)
L2 = table.insert(**f)
f["id"] = L2
onaccept(f)
else:
f = dict(name=L2,
level="L2",
)
L2 = table.insert(**f)
f["id"] = L2
onaccept(f)
else:
return None
else:
L2 = None
# L3
if L3:
try:
# Is this an ID?
L3 = int(L3)
# Do we need to update it's parent?
if L2:
location = db(table.id == L3).select(table.name,
table.parent,
limitby=(0, 1)).first()
if location and (location.parent != L2):
db(query).update(parent=L2)
location["level"] = "L3"
location["id"] = L3
onaccept(location)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L3 parenting direct to L0/1
query = (table.name == L3) & (table.level == "L3")
if L2:
query &= (table.parent == L2)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L3 = location.id
elif L3_allowed:
if permitted_to_create():
if L2:
f = dict(name=L3,
level="L3",
parent=L2,
)
L3 = table.insert(**f)
f["id"] = L3
onaccept(f)
elif L1:
f = dict(name=L3,
level="L3",
parent=L1,
)
L3 = table.insert(**f)
f["id"] = L3
onaccept(f)
elif L0:
f = dict(name=L3,
level="L3",
parent=L0,
)
L3 = table.insert(**f)
f["id"] = L3
onaccept(f)
else:
f = dict(name=L3,
level="L3",
)
L3 = table.insert(**f)
f["id"] = L3
onaccept(f)
else:
return None
else:
L3 = None
# L4
if L4:
try:
# Is this an ID?
L4 = int(L4)
# Do we need to update it's parent?
if L3:
location = db(table.id == L4).select(table.name,
table.parent,
limitby=(0, 1)).first()
if location and (location.parent != L3):
db(query).update(parent=L3)
location["level"] = "L4"
location["id"] = L4
onaccept(location)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L4 parenting direct to L0/1/2
query = (table.name == L4) & (table.level == "L4")
if L3:
query &= (table.parent == L3)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L4 = location.id
elif L4_allowed:
if permitted_to_create():
if L3:
f = dict(name=L4,
level="L4",
parent=L3,
)
L4 = table.insert(**f)
f["id"] = L4
onaccept(f)
elif L2:
f = dict(name=L4,
level="L4",
parent=L2,
)
L4 = table.insert(**f)
f["id"] = L4
onaccept(f)
elif L1:
f = dict(name=L4,
level="L4",
parent=L1,
)
L4 = table.insert(**f)
f["id"] = L4
onaccept(f)
elif L0:
f = dict(name=L4,
level="L4",
parent=L0,
)
L4 = table.insert(**f)
f["id"] = L4
onaccept(f)
else:
f = dict(name=L4,
level="L4",
)
L4 = table.insert(**f)
f["id"] = L4
onaccept(f)
else:
return None
else:
L4 = None
# L5
if L5:
try:
# Is this an ID?
L5 = int(L5)
# Do we need to update it's parent?
if L4:
location = db(table.id == L5).select(table.name,
table.parent,
limitby=(0, 1)).first()
if location and (location.parent != L4):
db(query).update(parent=L4)
location["level"] = "L5"
location["id"] = L5
onaccept(location)
except:
# Name
# Test for duplicates
# @ToDo: Also check for L5 parenting direct to L0/1/2/3
query = (table.name == L5) & (table.level == "L5")
if L4:
query &= (table.parent == L4)
location = db(query).select(table.id,
limitby=(0, 1)).first()
if location:
# Use Existing record
L5 = location.id
elif L5_allowed:
if permitted_to_create():
if L4:
f = dict(name=L5,
level="L5",
parent=L4,
)
L5 = table.insert(**f)
f["id"] = L5
onaccept(f)
elif L3:
f = dict(name=L5,
level="L5",
parent=L3,
)
L5 = table.insert(**f)
f["id"] = L5
onaccept(f)
elif L2:
f = dict(name=L5,
level="L5",
parent=L2,
)
L5 = table.insert(**f)
f["id"] = L5
onaccept(f)
elif L1:
f = dict(name=L5,
level="L5",
parent=L1,
)
L5 = table.insert(**f)
f["id"] = L5
onaccept(f)
elif L0:
f = dict(name=L5,
level="L5",
parent=L1,
)
L5 = table.insert(**f)
f["id"] = L5
onaccept(f)
else:
f = dict(name=L5,
level="L5",
)
L5 = table.insert(**f)
f["id"] = L5
onaccept(f)
else:
return None
else:
L5 = None
# Check if we have a specific location to create
name = vars.get("gis_location_name", None)
wkt = vars.get("gis_location_wkt", None)
street = vars.get("gis_location_street", None)
postcode = vars.get("gis_location_postcode", None)
parent = L5 or L4 or L3 or L2 or L1 or L0 or None
# Move vars into form.
form = Storage()
form.errors = dict()
form.vars = Storage()
vars = form.vars
vars.lat = lat
vars.lon = lon
vars.wkt = wkt
if wkt:
# Polygon (will be corrected as-required by wkt_centroid)
vars.gis_feature_type = "3"
else:
# Point
vars.gis_feature_type = "1"
vars.parent = parent
if self.id:
# Provide the old record to check inherited
form.record = db(table.id == self.id).select(table.inherited,
table.lat,
table.lon,
limitby=(0, 1)).first()
# onvalidation
s3db.gis_location_onvalidation(form)
if form.errors:
self.errors = form.errors
return None
location = Storage(name=name,
lat=vars.lat,
lon=vars.lon,
inherited=vars.inherited,
street=street,
postcode=postcode,
parent=parent,
wkt = vars.wkt,
gis_feature_type = vars.gis_feature_type,
lon_min = vars.lon_min,
lon_max = vars.lon_max,
lat_min = vars.lat_min,
lat_max = vars.lat_max
)
return location
# =============================================================================
class IS_LOCATION_SELECTOR2(Validator):
"""
Designed for use within the S3LocationSelectorWidget2.
For Create forms, this will create a new location if there is a Lat/Lon submitted
For Update forms, this will check that we have a valid location_id FK and update any changes
@ToDo: Audit
"""
def __init__(self,
levels=("L1", "L2", "L3"),
error_message = None,
):
self.levels = levels
self.error_message = error_message
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
if current.response.s3.bulk:
# Pointless in imports
return (value, None)
vars = current.request.post_vars
address = vars.get("address", None)
postcode = vars.get("postcode", None)
lat = vars.get("lat", None)
if lat == "":
lat = None
lon = vars.get("lon", None)
if lon == "":
lon = None
wkt = vars.get("wkt", None)
if wkt == "":
wkt = None
parent = vars.get("parent", None)
# Rough check for valid Lat/Lon
errors = Storage()
if lat:
try:
lat = float(lat)
except ValueError:
errors["lat"] = current.T("Latitude is Invalid!")
if lon:
try:
lon = float(lon)
except ValueError:
errors["lon"] = current.T("Longitude is Invalid!")
if wkt:
try:
from shapely.wkt import loads as wkt_loads
polygon = wkt_loads(wkt)
except:
errors["wkt"] = current.T("WKT is Invalid!")
if errors:
return (value, errors)
if parent or address or postcode or wkt is not None or \
(lat is not None and lon is not None):
# Specific Location
db = current.db
table = db.gis_location
if value == "dummy":
# Create a new point
if not current.auth.s3_has_permission("create", table):
return (None, current.auth.messages.access_denied)
vars = Storage(lat=lat,
lon=lon,
wkt=wkt,
inherited=False,
addr_street=address,
addr_postcode=postcode,
parent=parent,
)
# onvalidation
# - includes detailed bounds check if deployment_setting doesn't disable it
form = Storage()
form.errors = errors
form.vars = vars
current.s3db.gis_location_onvalidation(form)
if form.errors:
errors = form.errors
error = ""
for e in errors:
error = "%s\n%s" % (error, errors[e]) if error else errors[e]
return (parent, error)
id = table.insert(**vars)
vars.id = id
# onaccept
current.gis.update_location_tree(vars)
return (id, None)
else:
# Update existing Point
# Check that this is a valid location_id
query = (table.id == value) & \
(table.deleted == False) & \
(table.level == None) # NB Specific Locations only
location = db(query).select(table.lat,
table.lon,
table.wkt,
table.addr_street,
table.addr_postcode,
table.parent,
limitby=(0, 1)).first()
if location:
changed = False
lparent = location.parent
if parent and lparent:
if int(parent) != int(lparent):
changed = True
elif parent or lparent:
changed = True
if not changed:
addr_street = location.addr_street
if address and addr_street:
if address != addr_street:
changed = True
elif address or addr_street:
changed = True
if not changed:
addr_postcode = location.addr_postcode
if postcode and addr_postcode:
if postcode != addr_postcode:
changed = True
elif postcode or addr_postcode:
changed = True
if not changed:
if wkt and wkt != location.wkt:
changed = True
else:
# Float comparisons need care - just check the 1st 5 decimal points, as that's all we care about
llat = location.lat
if lat is not None and llat is not None:
if round(lat, 5) != round(llat, 5):
changed = True
elif lat is not None or llat is not None:
changed = True
if not changed:
llon = location.lon
if lon is not None and llon is not None:
if round(lon, 5) != round(llon, 5):
changed = True
elif lon is not None or llon is not None:
changed = True
if changed:
# Update the record
if not current.auth.s3_has_permission("update", table, record_id=value):
return (value, current.auth.messages.access_denied)
vars = Storage(addr_street=address,
addr_postcode=postcode,
parent=parent,
)
if lat is not None and lon is not None:
vars.lat = lat
vars.lon = lon
vars.inherited = False
elif wkt is not None:
vars.wkt = wkt
vars.inherited = False
# onvalidation
# - includes detailed bounds check if deployment_setting doesn't disable it
form = Storage()
form.errors = errors
form.vars = vars
current.s3db.gis_location_onvalidation(form)
if form.errors:
errors = form.errors
error = ""
for e in errors:
error = "%s\n%s" % (error, errors[e]) if error else errors[e]
return (value, error)
# Update the record
db(table.id == value).update(**vars)
# Update location tree in case parent has changed
vars.id = value
# onaccept
current.gis.update_location_tree(vars)
return (value, None)
else:
return (value,
self.error_message or current.T("Invalid Location!"))
else:
# Lx or a specific location with blank Parent/Address/Lat/Lon
if value:
db = current.db
table = db.gis_location
query = (table.id == value) & \
(table.deleted == False)
location = db(query).select(table.level,
table.lat,
table.lon,
table.addr_street,
table.addr_postcode,
table.parent,
limitby=(0, 1)).first()
if not location:
return (value,
self.error_message or current.T("Invalid Location!"))
if location.level:
# Do a simple Location check
return IS_LOCATION(level=self.levels)(value)
else:
# Clear the Parent/Lat/Lon/Address
vars = Storage(lat = None,
lon = None,
addr_street = None,
addr_postcode = None,
parent = None)
db(table.id == value).update(**vars)
# Update location tree in case parent has changed
vars.id = value
# onaccept
current.gis.update_location_tree(vars)
else:
# Do a simple Location check
return IS_LOCATION(level=self.levels)(value)
# =============================================================================
class IS_SITE_SELECTOR(IS_LOCATION_SELECTOR):
"""
Extends the IS_LOCATION_SELECTOR() validator to transparently support
Sites of the specified type.
Note that these cannot include any other mandatory fields other than Name & location_id
Designed for use within the ???S3LocationSelectorWidget.
For Create forms, this will create a new site & location from the additional fields
For Update forms, this will normally just check that we have a valid site_id FK
- although there is the option to create a new location there too, in which case it acts as-above.
@ToDo: Audit
"""
def __init__(self,
site_type = "project_site",
error_message = None,
):
self.error_message = error_message
self.errors = Storage()
self.id = None
self.site_type = site_type
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
if current.response.s3.bulk:
# Pointless in imports
return (value, None)
db = current.db
auth = current.auth
gis = current.gis
table = db.gis_location
stable = db[self.site_type]
if value == "dummy":
# Create form
if not auth.s3_has_permission("create", stable):
return (None, auth.messages.access_denied)
location = self._process_values()
if self.errors:
errors = self.errors
error = ""
for e in errors:
error = "%s\n%s" % (error, errors[e]) if error else errors[e]
return (None, error)
if location.name or location.lat or location.lon or \
location.street or location.postcode or location.parent:
# Location creation
vars = dict(name = location.name,
lat = location.lat,
lon = location.lon,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent,
wkt = form.vars.wkt,
lon_min = form.vars.lon_min,
lon_max = form.vars.lon_max,
lat_min = form.vars.lat_min,
lat_max = form.vars.lat_max
)
location_id = table.insert(**vars)
# Location onaccept
vars["id"] = location_id
gis.update_location_tree(vars)
# Site creation
value = stable.insert(name = location.name,
location_id = location_id)
return (value, None)
else:
return (None, None)
else:
# This must be an Update form
if not auth.s3_has_permission("update", stable, record_id=value):
return (value, auth.messages.access_denied)
# Check that this is a valid site_id
query = (stable.id == value) & \
(stable.deleted == False)
site = db(query).select(stable.id,
stable.name,
stable.location_id,
limitby=(0, 1)).first()
location_id = site.location_id if site else None
if location_id:
# Update the location, in case changes have been made
self.id = value
location = self._process_values()
if self.errors:
errors = self.errors
error = ""
for e in errors:
error = "%s\n%s" % (error, errors[e]) if error else errors[e]
return (value, error)
# Location update
name = location.name
vars = dict(name = name,
lat = location.lat,
lon = location.lon,
addr_street = location.street,
addr_postcode = location.postcode,
parent = location.parent
)
lquery = (table.id == location_id)
db(lquery).update(**vars)
# Location onaccept
vars["id"] = location_id
gis.update_location_tree(vars)
if stable.name != name:
# Site Name has changed
db(query).update(name = name)
return (value, None)
return (value, self.error_message or current.T("Invalid Site!"))
# =============================================================================
class IS_ADD_PERSON_WIDGET(Validator):
"""
Validator for S3AddPersonWidget
"""
def __init__(self,
error_message=None):
self.error_message = error_message
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
if current.response.s3.bulk:
# Pointless in imports
return (value, None)
person_id = None
if value:
try:
person_id = int(value)
except:
pass
request = current.request
if request.env.request_method == "POST":
if "import" in request.args:
# Widget Validator not appropriate for this context
return (person_id, None)
T = current.T
db = current.db
s3db = current.s3db
ptable = db.pr_person
ctable = db.pr_contact
def email_validate(value, person_id):
""" Validate the email address """
error_message = T("Please enter a valid email address")
if value is not None:
value = value.strip()
# No email?
if not value:
email_required = \
current.deployment_settings.get_hrm_email_required()
if email_required:
return (value, error_message)
return (value, None)
# Valid email?
value, error = IS_EMAIL()(value)
if error:
return value, error_message
# Unique email?
query = (ctable.deleted != True) & \
(ctable.contact_method == "EMAIL") & \
(ctable.value == value)
if person_id:
query &= (ctable.pe_id == ptable.pe_id) & \
(ptable.id != person_id)
email = db(query).select(ctable.id, limitby=(0, 1)).first()
if email:
error_message = T("This email-address is already registered.")
return value, error_message
# Ok!
return value, None
_vars = request.post_vars
mobile = _vars["mobile_phone"]
if mobile:
# Validate the phone number
regex = re.compile(single_phone_number_pattern)
if not regex.match(mobile):
error = T("Invalid phone number")
return (person_id, error)
validate = current.manager.validate
if person_id:
# Filter out location_id (location selector form values
# being processed only after this widget has been validated)
_vars = Storage([(k, _vars[k])
for k in _vars if k != "location_id"])
# Validate and update the person record
query = (ptable.id == person_id)
data = Storage()
for f in ptable._filter_fields(_vars):
value, error = validate(ptable, None, f, _vars[f])
if error:
return (person_id, error)
if value:
if f == "date_of_birth":
data[f] = value.isoformat()
else:
data[f] = value
if data:
db(query).update(**data)
# Update the contact information & details
record = db(query).select(ptable.pe_id,
limitby=(0, 1)).first()
if record:
pe_id = record.pe_id
r = ctable(pe_id=pe_id, contact_method="EMAIL")
email = _vars["email"]
if email:
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "EMAIL") &\
(ctable.deleted != True)
r = db(query).select(ctable.value,
limitby=(0, 1)).first()
if r: # update
if email != r.value:
db(query).update(value=email)
else: # insert
ctable.insert(pe_id=pe_id,
contact_method="EMAIL",
value=email)
if mobile:
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "SMS") &\
(ctable.deleted != True)
r = db(query).select(ctable.value,
limitby=(0, 1)).first()
if r: # update
if mobile != r.value:
db(query).update(value=mobile)
else: # insert
ctable.insert(pe_id=pe_id,
contact_method="SMS",
value=mobile)
occupation = _vars["occupation"]
if occupation:
pdtable = s3db.pr_person_details
query = (pdtable.person_id == person_id) & \
(pdtable.deleted != True)
r = db(query).select(pdtable.occupation,
limitby=(0, 1)).first()
if r: # update
if occupation != r.occupation:
db(query).update(occupation=occupation)
else: # insert
pdtable.insert(person_id=person_id,
occupation=occupation)
else:
# Create a new person record
# Filter out location_id (location selector form values
# being processed only after this widget has been validated)
_vars = Storage([(k, _vars[k])
for k in _vars if k != "location_id"])
# Validate the email
email, error = email_validate(_vars.email, None)
if error:
return (None, error)
# Validate and add the person record
for f in ptable._filter_fields(_vars):
value, error = validate(ptable, None, f, _vars[f])
if error:
return (None, error)
elif f == "date_of_birth" and \
value:
_vars[f] = value.isoformat()
person_id = ptable.insert(**ptable._filter_fields(_vars))
# Need to update post_vars here,
# for some reason this doesn't happen through validation alone
request.post_vars.update(person_id=str(person_id))
if person_id:
# Update the super-entities
s3db.update_super(ptable, dict(id=person_id))
# Read the created pe_id
query = (ptable.id == person_id)
person = db(query).select(ptable.pe_id,
limitby=(0, 1)).first()
# Add contact information as provided
if _vars.email:
ctable.insert(pe_id=person.pe_id,
contact_method="EMAIL",
value=_vars.email)
if mobile:
ctable.insert(pe_id=person.pe_id,
contact_method="SMS",
value=_vars.mobile_phone)
if _vars.occupation:
s3db.pr_person_details.insert(person_id = person_id,
occupation = _vars.occupation)
else:
# Something went wrong
return (None, self.error_message or \
T("Could not add person record"))
return (person_id, None)
# =============================================================================
class IS_ADD_PERSON_WIDGET2(Validator):
"""
Validator for S3AddPersonWidget2
@ToDo: get working human_resource_id
"""
def __init__(self,
error_message=None):
self.error_message = error_message
# Tell s3_mark_required that this validator doesn't accept NULL values
self.mark_required = True
# -------------------------------------------------------------------------
def __call__(self, value):
if current.response.s3.bulk:
# Pointless in imports
return (value, None)
person_id = None
if value:
try:
person_id = int(value)
except:
pass
request = current.request
if request.env.request_method == "POST":
if "import" in request.args:
# Widget Validator not appropriate for this context
return (person_id, None)
T = current.T
db = current.db
s3db = current.s3db
ptable = db.pr_person
ctable = s3db.pr_contact
def name_split(name):
"""
Split a full name into First Middle Last
NB This *will* cause issues as people often have multi-word firstnames and surnames
http://stackoverflow.com/questions/259634/splitting-a-persons-name-into-forename-and-surname
http://stackoverflow.com/questions/159567/how-can-i-parse-the-first-middle-and-last-name-from-a-full-name-field-in-sql
"""
#names = name.split(" ")
# Remove prefixes & suffixes
#bad = ("mr", "mrs", "ms", "dr", "eng",
# "jr", "sr", "esq", "junior", "senior",
# "ii", "iii", "iv", "v",
# "2nd", "3rd", "4th", "5th",
# )
#names = filter(lambda x: x.lower() not in bad, names)
# Assume First Name is a single word
#first_name = names[0]
# Assume Last Name is a single word!
#if len(names) > 1:
# last_name = names[-1]
#else:
# last_name = None
# Assume all other names go into the Middle Name
#if len(names) > 2:
# middle_name = " ".join(names[1:-1])
#else:
# middle_name = None
#return first_name, middle_name, last_name
# https://code.google.com/p/python-nameparser/
from nameparser import HumanName
name = HumanName(name)
return name.first, name.middle, name.last
def email_validate(value, person_id):
""" Validate the email address """
error_message = T("Please enter a valid email address")
if value is not None:
value = value.strip()
# No email?
if not value:
email_required = \
current.deployment_settings.get_hrm_email_required()
if email_required:
return (value, error_message)
return (value, None)
# Valid email?
value, error = IS_EMAIL()(value)
if error:
return value, error_message
# Unique email?
query = (ctable.deleted != True) & \
(ctable.contact_method == "EMAIL") & \
(ctable.value == value)
if person_id:
query &= (ctable.pe_id == ptable.pe_id) & \
(ptable.id != person_id)
email = db(query).select(ctable.id, limitby=(0, 1)).first()
if email:
error_message = T("This email-address is already registered.")
return value, error_message
# Ok!
return value, None
_vars = request.post_vars
mobile = _vars["mobile_phone"]
if mobile:
# Validate the phone number
regex = re.compile(single_phone_number_pattern)
if not regex.match(mobile):
error = T("Invalid phone number")
return (person_id, error)
home_phone = _vars.get("home_phone", None)
if home_phone:
# Validate the phone number
regex = re.compile(single_phone_number_pattern)
if not regex.match(home_phone):
error = T("Invalid phone number")
return (person_id, error)
validate = current.manager.validate
if person_id:
# Filter out location_id (location selector form values
# being processed only after this widget has been validated)
_vars = Storage([(k, _vars[k])
for k in _vars if k != "location_id"])
# Separate the Name into components
first_name, middle_name, last_name = name_split(_vars["full_name"])
_vars["first_name"] = first_name
_vars["middle_name"] = middle_name
_vars["last_name"] = last_name
# Validate and update the person record
query = (ptable.id == person_id)
data = Storage()
for f in ptable._filter_fields(_vars):
value, error = validate(ptable, None, f, _vars[f])
if error:
return (person_id, error)
if value:
if f == "date_of_birth":
data[f] = value.isoformat()
else:
data[f] = value
if data:
db(query).update(**data)
# Update the contact information & details
record = db(query).select(ptable.pe_id,
limitby=(0, 1)).first()
if record:
pe_id = record.pe_id
r = ctable(pe_id=pe_id, contact_method="EMAIL")
email = _vars["email"]
if email:
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "EMAIL") &\
(ctable.deleted != True)
r = db(query).select(ctable.value,
limitby=(0, 1)).first()
if r: # update
if email != r.value:
db(query).update(value=email)
else: # insert
ctable.insert(pe_id=pe_id,
contact_method="EMAIL",
value=email)
if mobile:
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "SMS") &\
(ctable.deleted != True)
r = db(query).select(ctable.value,
limitby=(0, 1)).first()
if r: # update
if mobile != r.value:
db(query).update(value=mobile)
else: # insert
ctable.insert(pe_id=pe_id,
contact_method="SMS",
value=mobile)
if home_phone:
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "HOME_PHONE") &\
(ctable.deleted != True)
r = db(query).select(ctable.value,
limitby=(0, 1)).first()
if r: # update
if home_phone != r.value:
db(query).update(value=home_phone)
else: # insert
ctable.insert(pe_id=pe_id,
contact_method="HOME_PHONE",
value=home_phone)
occupation = _vars.get("occupation", None)
if occupation:
pdtable = s3db.pr_person_details
query = (pdtable.person_id == person_id) & \
(pdtable.deleted != True)
r = db(query).select(pdtable.occupation,
limitby=(0, 1)).first()
if r: # update
if occupation != r.occupation:
db(query).update(occupation=occupation)
else: # insert
pdtable.insert(person_id=person_id,
occupation=occupation)
else:
# Create a new person record
# Filter out location_id (location selector form values
# being processed only after this widget has been validated)
_vars = Storage([(k, _vars[k])
for k in _vars if k != "location_id"])
# Validate the email
email, error = email_validate(_vars.email, None)
if error:
return (None, error)
# Separate the Name into components
first_name, middle_name, last_name = name_split(_vars["full_name"])
_vars["first_name"] = first_name
_vars["middle_name"] = middle_name
_vars["last_name"] = last_name
# Validate and add the person record
for f in ptable._filter_fields(_vars):
value, error = validate(ptable, None, f, _vars[f])
if error:
return (None, None)
elif f == "date_of_birth" and \
value:
_vars[f] = value.isoformat()
person_id = ptable.insert(**ptable._filter_fields(_vars))
# Need to update post_vars here,
# for some reason this doesn't happen through validation alone
request.post_vars.update(person_id=str(person_id))
if person_id:
# Update the super-entities
s3db.update_super(ptable, dict(id=person_id))
# Read the created pe_id
query = (ptable.id == person_id)
person = db(query).select(ptable.pe_id,
limitby=(0, 1)).first()
# Add contact information as provided
if _vars.email:
ctable.insert(pe_id=person.pe_id,
contact_method="EMAIL",
value=_vars.email)
if mobile:
ctable.insert(pe_id=person.pe_id,
contact_method="SMS",
value=_vars.mobile_phone)
if home_phone:
ctable.insert(pe_id=person.pe_id,
contact_method="HOME_PHONE",
value=_vars.home_phone)
if _vars.occupation:
s3db.pr_person_details.insert(person_id = person_id,
occupation = _vars.occupation)
else:
# Something went wrong
return (person_id, self.error_message or \
T("Could not add person record"))
return (person_id, None)
# =============================================================================
class IS_PROCESSED_IMAGE(Validator):
"""
Uses an S3ImageCropWidget to allow the user to crop/scale images and
processes the results sent by the browser.
@param file_cb: callback that returns the file for this field
@param error_message: the error message to be returned
@param image_bounds: the boundaries for the processed image
@param upload_path: upload path for the image
"""
def __init__(self,
field_name,
file_cb,
error_message="No image was specified!",
image_bounds=(300, 300),
upload_path=None,
):
self.field_name = field_name
self.file_cb = file_cb
self.error_message = error_message
self.image_bounds = image_bounds
self.upload_path = upload_path
def __call__(self, value):
if current.response.s3.bulk:
# Pointless in imports
return (value, None)
r = current.request
vars = r.post_vars
if r.env.request_method == "GET":
return (value, None)
# If there's a newly uploaded file, accept it. It'll be processed in
# the update form.
# NOTE: A FieldStorage with data evaluates as False (odd!)
file = vars.get(self.field_name)
if file not in ("", None):
return (file, None)
encoded_file = vars.get("imagecrop-data")
file = self.file_cb()
if not (encoded_file or file):
return value, current.T(self.error_message)
# Decode the base64-encoded image from the client side image crop
# process if, that worked.
if encoded_file:
import base64
import uuid
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
metadata, encoded_file = encoded_file.split(",")
filename, datatype, enctype = metadata.split(";")
f = Storage()
f.filename = uuid.uuid4().hex + filename
f.file = StringIO(base64.decodestring(encoded_file))
return (f, None)
# Crop the image, if we've got the crop points.
points = vars.get("imagecrop-points")
if points and file:
import os
points = map(float, points.split(","))
if not self.upload_path:
path = os.path.join(r.folder, "uploads", "images", file)
else:
path = os.path.join(self.upload_path, file)
current.s3task.async("crop_image",
args=[path] + points + [self.image_bounds[0]])
return (None, None)
# =============================================================================
class IS_UTC_OFFSET(Validator):
"""
Validates a given string value as UTC offset in the format +/-HHMM
@param error_message: the error message to be returned
@note:
all leading parts of the string (before the trailing offset specification)
will be ignored and replaced by 'UTC ' in the return value, if the string
passes through.
"""
def __init__(self,
error_message="invalid UTC offset!"
):
self.error_message = error_message
# -------------------------------------------------------------------------
def __call__(self, value):
if value and isinstance(value, str):
_offset_str = value.strip()
offset = S3DateTime.get_offset_value(_offset_str)
if offset is not None and offset > -86340 and offset < 86340:
# Add a leading 'UTC ',
# otherwise leading '+' and '0' will be stripped away by web2py
return ("UTC " + _offset_str[-5:], None)
return (value, self.error_message)
# =============================================================================
class IS_UTC_DATETIME(Validator):
"""
Validates a given value as datetime string and returns the
corresponding UTC datetime.
Example:
- INPUT(_type="text", _name="name", requires=IS_UTC_DATETIME())
@param format: strptime/strftime format template string, for
directives refer to your strptime implementation
@param error_message: error message to be returned
@param utc_offset: offset to UTC in seconds, if not specified, the
value is considered to be UTC
@param minimum: the minimum acceptable datetime
@param maximum: the maximum acceptable datetime
@note:
datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss,
with an optional trailing UTC offset specified as +/-HHMM
(+ for eastern, - for western timezones)
"""
def __init__(self,
format=None,
error_message=None,
utc_offset=None,
minimum=None,
maximum=None):
if format is None:
self.format = format = str(current.deployment_settings.get_L10n_datetime_format())
else:
self.format = format = str(format)
self.utc_offset = utc_offset
self.minimum = minimum
self.maximum = maximum
delta = timedelta(seconds=self.delta())
min_local = minimum and minimum + delta or None
max_local = maximum and maximum + delta or None
if error_message is None:
if minimum is None and maximum is None:
error_message = current.T("enter date and time")
elif minimum is None:
error_message = current.T("enter date and time on or before %(max)s")
elif maximum is None:
error_message = current.T("enter date and time on or after %(min)s")
else:
error_message = current.T("enter date and time in range %(min)s %(max)s")
if min_local:
min = min_local.strftime(format)
else:
min = ""
if max_local:
max = max_local.strftime(format)
else:
max = ""
self.error_message = error_message % dict(min = min,
max = max)
# -------------------------------------------------------------------------
def delta(self, utc_offset=None):
if utc_offset is not None:
self.utc_offset = utc_offset
if self.utc_offset is None:
self.utc_offset = current.session.s3.utc_offset
validate = IS_UTC_OFFSET()
offset, error = validate(self.utc_offset)
if error:
self.utc_offset = "UTC +0000" # fallback to UTC
else:
self.utc_offset = offset
delta = S3DateTime.get_offset_value(self.utc_offset)
return delta
# -------------------------------------------------------------------------
def __call__(self, value):
val = value.strip()
# Get UTC offset
if len(val) > 5 and val[-5] in ("+", "-") and val[-4:].isdigit():
# UTC offset specified in dtstr
dtstr = val[0:-5].strip()
utc_offset = "UTC %s" % val[-5:]
else:
# use default UTC offset
dtstr = val
utc_offset = self.utc_offset
# Offset must be in range -2359 to +2359
offset = self.delta(utc_offset=utc_offset)
if offset < -86340 or offset > 86340:
return (val, self.error_message)
# Convert into datetime object
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(dtstr, self.format)
dt = datetime(y, m, d, hh, mm, ss)
except:
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(dtstr + ":00", self.format)
dt = datetime(y, m, d, hh, mm, ss)
except:
return(value, self.error_message)
# Validate
dt_utc = dt - timedelta(seconds=offset)
if self.minimum and dt_utc < self.minimum or \
self.maximum and dt_utc > self.maximum:
return (dt_utc, self.error_message)
else:
return (dt_utc, None)
# -------------------------------------------------------------------------
def formatter(self, value):
format = self.format
offset = self.delta()
if not value:
return "-"
elif offset:
dt = value + timedelta(seconds=offset)
return dt.strftime(format)
else:
dt = value
return dt.strftime(format) + "+0000"
# =============================================================================
class IS_ACL(IS_IN_SET):
"""
Validator for ACLs
@attention: Incomplete! Does not validate yet, but just convert.
"""
def __call__(self, value):
"""
Validation
@param value: the value to validate
"""
if not isinstance(value, (list, tuple)):
value = [value]
acl = 0x0000
for v in value:
try:
flag = int(v)
except (ValueError, TypeError):
flag = 0x0000
else:
acl |= flag
return (acl, None)
# =============================================================================
class IS_COMBO_BOX(Validator):
"""
Designed for use with an Autocomplete.
- catches any new entries & creates the appropriate record
@ToDo: Audit
"""
def __init__(self,
tablename,
requires, # The normal validator
error_message = None,
):
self.tablename = tablename
self.requires = requires
self.error_message = error_message
# -------------------------------------------------------------------------
def __call__(self, value):
if not value:
# Do the normal validation
return self.requires(value)
elif isinstance(value, int):
# If this is an ID then this is an update form
# @ToDo: Can we assume that?
# Do the normal validation
return self.requires(value)
else:
# Name => create form
tablename = self.tablename
db = current.db
table = db[tablename]
# Test for duplicates
query = (table.name == value)
r = db(query).select(table.id,
limitby=(0, 1)).first()
if r:
# Use Existing record
value = r.id
return (value, None)
if not current.auth.s3_has_permission("create", table):
return (None, current.auth.messages.access_denied)
value = table.insert(name=value)
# onaccept
onaccept = current.s3db.get_config(tablename, "onaccept")
if onaccept:
onaccept(form=Storage(vars=Storage(id=value)))
return (value, None)
# =============================================================================
class QUANTITY_INV_ITEM(object):
"""
For Inventory module
"""
def __init__(self,
db,
inv_item_id,
item_pack_id
):
self.inv_item_id = inv_item_id
self.item_pack_id = item_pack_id
current.db = db
# -------------------------------------------------------------------------
def __call__(self, value):
db = current.db
args = current.request.args
track_quantity = 0
if args[1] == "track_item" and len(args) > 2:
# look to see if we already have a quantity stored in the track item
id = args[2]
track_record = current.s3db.inv_track_item[id]
track_quantity = track_record.quantity
if track_quantity >= float(value):
# value reduced or unchanged
return (value, None)
error = "Invalid Quantity" # @todo: better error catching
query = (db.inv_inv_item.id == self.inv_item_id) & \
(db.inv_inv_item.item_pack_id == db.supply_item_pack.id)
inv_item_record = db(query).select(db.inv_inv_item.quantity,
db.supply_item_pack.quantity,
db.supply_item_pack.name,
limitby = (0, 1)).first() # @todo: this should be a virtual field
if inv_item_record and value:
query = (db.supply_item_pack.id == self.item_pack_id)
send_record = db(query).select(db.supply_item_pack.quantity,
limitby=(0, 1)).first()
send_quantity = (float(value) - track_quantity) * send_record.quantity
inv_quantity = inv_item_record.inv_inv_item.quantity * \
inv_item_record.supply_item_pack.quantity
if send_quantity > inv_quantity:
return (value,
"Only %s %s (%s) in the Warehouse Stock." %
(inv_quantity,
inv_item_record.supply_item_pack.name,
inv_item_record.supply_item_pack.quantity)
)
else:
return (value, None)
else:
return (value, error)
# -------------------------------------------------------------------------
def formatter(self, value):
return value
# =============================================================================
class IS_IN_SET_LAZY(Validator):
"""
Like IS_IN_SET but with options obtained from a supplied function.
Options are instantiated when the validator or its options() method is
called, so don't need to be generated until it's used. Useful if the
field is not needed on every request, and does significant processing
to construct its options, or generates a large collection. If the
options are just from a database query, one can use IS_ONE_OF instead.
Raises an exception if an options collection is passed rather than a
callable as this is a programming error, e.g. accidentally *calling*
the options function in the constructor instead of passing the
function. That would not get lazy options instantiation.
The options collection (theset) and labels collection parameters to
IS_IN_SET are replaced by:
@param theset_fn: Function of no arguments that returns a collection
of options and (optionally) labels. Both options and labels can be
supplied via a dict or OrderedDict (options are keys, values are
labels), list (or tuple) of two-element lists (or tuples) (element 0 in
each pair is an option, element 1 is it's label). Otherwise, labels
are obtained either by calling the supplied represent function on each
item produced by theset_fn, or (if no represent is supplied), the items
themselves are used as labels.
@param represent: Function of one argument that returns the label for
a given option.
If there is a function call that returns the collection, just put
"lambda:" in front of the call. E.g.:
Field("nationality",
requires = IS_NULL_OR(IS_IN_SET_LAZY(
lambda: gis.get_countries(key_type="code"))),
label = T("Nationality"),
represent = lambda code: gis.get_country(code, key_type="code") or UNKNOWN_OPT)
Keyword parameters are same as for IS_IN_SET, except for labels, which
is not replaced by a function that parallels theset_fn, since ordering
is problematic if theset_fn returns a dict.
"""
def __init__(
self,
theset_fn,
represent=None,
error_message="value not allowed",
multiple=False,
zero="",
sort=False,
):
self.multiple = multiple
if not callable(theset_fn):
raise TypeError("Argument must be a callable.")
self.theset_fn = theset_fn
self.theset = None
self.labels = None
self.error_message = error_message
self.zero = zero
self.sort = sort
# -------------------------------------------------------------------------
def _make_theset(self):
theset = self.theset_fn()
if theset:
if isinstance(theset, dict):
self.theset = [str(item) for item in theset]
self.labels = theset.values()
elif isinstance(theset, (tuple,list)): # @ToDo: Can this be a Rows?
if isinstance(theset[0], (tuple,list)) and len(theset[0])==2:
self.theset = [str(item) for item,label in theset]
self.labels = [str(label) for item,label in theset]
else:
self.theset = [str(item) for item in theset]
if represent:
self.labels = [represent(item) for item in theset]
else:
self.theset = theset
else:
self.theset = []
# -------------------------------------------------------------------------
def options(self, zero=True):
if not self.theset:
self._make_theset()
if not self.labels:
items = [(k, k) for (i, k) in enumerate(self.theset)]
else:
items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)]
if self.sort:
items.sort(options_sorter)
if zero and not self.zero is None and not self.multiple:
items.insert(0, ("", self.zero))
return items
# -------------------------------------------------------------------------
def __call__(self, value):
if not self.theset:
self._make_theset()
if self.multiple:
### if below was values = re.compile("[\w\-:]+").findall(str(value))
if isinstance(value, (str,unicode)):
values = [value]
elif isinstance(value, (tuple, list)):
values = value
elif not value:
values = []
else:
values = [value]
failures = [x for x in values if not x in self.theset]
if failures and self.theset:
if self.multiple and (value == None or value == ""):
return ([], None)
return (value, self.error_message)
if self.multiple:
if isinstance(self.multiple,(tuple,list)) and \
not self.multiple[0]<=len(values)<self.multiple[1]:
return (values, self.error_message)
return (values, None)
return (value, None)
# =============================================================================
class IS_TIME_INTERVAL_WIDGET(Validator):
"""
Simple validator for the S3TimeIntervalWidget, returns
the selected time interval in seconds
"""
def __init__(self, field):
self.field = field
# -------------------------------------------------------------------------
def __call__(self, value):
try:
val = int(value)
except ValueError:
return (0, None)
request = current.request
_vars = request.post_vars
try:
mul = int(_vars[("%s_multiplier" % self.field).replace(".", "_")])
except ValueError:
return (0, None)
seconds = val * mul
return (seconds, None)
# END =========================================================================
| mit |
frankiecjunle/yunblog | venv/lib/python2.7/site-packages/pygments/formatter.py | 72 | 2790 | # -*- coding: utf-8 -*-
"""
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, basestring):
return get_style_by_name(style)
return style
class Formatter(object):
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
self.encoding = options.get('outencoding', None) or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)
| mit |
tstieven/amiunique | website/venv/lib/python2.7/site-packages/pymongo/cursor_manager.py | 24 | 2085 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DEPRECATED - A manager to handle when cursors are killed after they are
closed.
New cursor managers should be defined as subclasses of CursorManager and can be
installed on a client by calling
:meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager`.
.. versionchanged:: 3.3
Deprecated, for real this time.
.. versionchanged:: 3.0
Undeprecated. :meth:`~pymongo.cursor_manager.CursorManager.close` now
requires an `address` argument. The ``BatchCursorManager`` class is removed.
"""
import warnings
import weakref
from bson.py3compat import integer_types
class CursorManager(object):
"""DEPRECATED - The cursor manager base class."""
def __init__(self, client):
"""Instantiate the manager.
:Parameters:
- `client`: a MongoClient
"""
warnings.warn(
"Cursor managers are deprecated.",
DeprecationWarning,
stacklevel=2)
self.__client = weakref.ref(client)
def close(self, cursor_id, address):
"""Kill a cursor.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
- `address`: the cursor's server's (host, port) pair
.. versionchanged:: 3.0
Now requires an `address` argument.
"""
if not isinstance(cursor_id, integer_types):
raise TypeError("cursor_id must be an integer")
self.__client().kill_cursors([cursor_id], address)
| mit |
petewarden/tensorflow | tensorflow/python/kernel_tests/ctc_decoder_ops_test.py | 8 | 8906 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_loss_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import zip_longest
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import ctc_ops
from tensorflow.python.platform import test
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
class CTCGreedyDecoderTest(test.TestCase):
def _testCTCDecoder(self,
decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
expected_err_re=None,
**decoder_args):
inputs_t = [ops.convert_to_tensor(x) for x in inputs]
# convert inputs_t into a [max_time x batch_size x depth] tensor
# from a len time python list of [batch_size x depth] tensors
inputs_t = array_ops.stack(inputs_t)
with self.cached_session(use_gpu=False) as sess:
decoded_list, log_probability = decoder(
inputs_t, sequence_length=seq_lens, **decoder_args)
decoded_unwrapped = list(
flatten([(st.indices, st.values, st.dense_shape) for st in
decoded_list]))
if expected_err_re is None:
outputs = sess.run(decoded_unwrapped + [log_probability])
# Group outputs into (ix, vals, shape) tuples
output_sparse_tensors = list(grouper(outputs[:-1], 3))
output_log_probability = outputs[-1]
# Check the number of decoded outputs (top_paths) match
self.assertEqual(len(output_sparse_tensors), len(decode_truth))
# For each SparseTensor tuple, compare (ix, vals, shape)
for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth,
decoded_list):
self.assertAllEqual(out_st[0], truth_st[0]) # ix
self.assertAllEqual(out_st[1], truth_st[1]) # vals
self.assertAllEqual(out_st[2], truth_st[2]) # shape
# Compare the shapes of the components with the truth. The
# `None` elements are not known statically.
self.assertEqual([None, truth_st[0].shape[1]],
tf_st.indices.get_shape().as_list())
self.assertEqual([None], tf_st.values.get_shape().as_list())
self.assertShapeEqual(truth_st[2], tf_st.dense_shape)
# Make sure decoded probabilities match
self.assertAllClose(output_log_probability, log_prob_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run(decoded_unwrapped + [log_probability])
@test_util.run_deprecated_v1
def testCTCGreedyDecoder(self):
"""Test two batch entries - best path decoder."""
max_time_steps = 6
# depth == 4
seq_len_0 = 4
input_prob_matrix_0 = np.asarray(
[[1.0, 0.0, 0.0, 0.0], # t=0
[0.0, 0.0, 0.4, 0.6], # t=1
[0.0, 0.0, 0.4, 0.6], # t=2
[0.0, 0.9, 0.1, 0.0], # t=3
[0.0, 0.0, 0.0, 0.0], # t=4 (ignored)
[0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
seq_len_1 = 5
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[
[0.1, 0.9, 0.0, 0.0], # t=0
[0.0, 0.9, 0.1, 0.0], # t=1
[0.0, 0.0, 0.1, 0.9], # t=2
[0.0, 0.9, 0.1, 0.1], # t=3
[0.9, 0.1, 0.0, 0.0], # t=4
[0.0, 0.0, 0.0, 0.0]
], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
# len max_time_steps array of batch_size x depth matrices
inputs = [
np.vstack(
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
for t in range(max_time_steps)
]
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32)
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
np.sum(-np.log([1.0, 0.6, 0.6, 0.9])),
np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9]))
], np.float32)[:, np.newaxis]
# decode_truth: one SparseTensor (ix, vals, shape)
decode_truth = [
(
np.array(
[
[0, 0], # batch 0, 2 outputs
[0, 1],
[1, 0], # batch 1, 3 outputs
[1, 1],
[1, 2]
],
dtype=np.int64),
np.array(
[
0,
1, # batch 0
1,
1,
0
], # batch 1
dtype=np.int64),
# shape is batch x max_decoded_length
np.array(
[2, 3], dtype=np.int64)),
]
self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens,
log_prob_truth, decode_truth)
@test_util.run_deprecated_v1
def testCTCDecoderBeamSearch(self):
"""Test one batch, two beams - hibernating beam search."""
# max_time_steps == 8
depth = 6
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
[
[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
# Random entry added in at time=5
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]
],
dtype=np.float32)
# Add arbitrary offset - this is fine
input_prob_matrix_0 = input_prob_matrix_0 + 2.0
# len max_time_steps array of batch_size x depth matrices
inputs = ([
input_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0)
] # Pad to max_time_steps = 8
+ 2 * [np.zeros(
(1, depth), dtype=np.float32)])
# batch_size length vector of sequence_lengths
seq_lens = np.array([seq_len_0], dtype=np.int32)
# batch_size length vector of log probabilities
log_prob_truth = np.array(
[
-5.811451, # output beam 0
-6.63339 # output beam 1
],
np.float32)[np.newaxis, :]
# decode_truth: two SparseTensors, (ix, values, shape)
decode_truth = [
# beam 0, batch 0, two outputs decoded
(np.array(
[[0, 0], [0, 1]], dtype=np.int64), np.array(
[1, 0], dtype=np.int64), np.array(
[1, 2], dtype=np.int64)),
# beam 1, batch 0, one output decoded
(np.array(
[[0, 0]], dtype=np.int64), np.array(
[1], dtype=np.int64), np.array(
[1, 1], dtype=np.int64)),
]
# Test correct decoding.
self._testCTCDecoder(
ctc_ops.ctc_beam_search_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
beam_width=2,
top_paths=2)
# Requesting more paths than the beam width allows.
with self.assertRaisesRegex(errors.InvalidArgumentError,
(".*requested more paths than the beam "
"width.*")):
self._testCTCDecoder(
ctc_ops.ctc_beam_search_decoder,
inputs,
seq_lens,
log_prob_truth,
decode_truth,
beam_width=2,
top_paths=3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hayderimran7/tempest | tempest/services/baremetal/v1/json/baremetal_client.py | 23 | 12048 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.baremetal import base
class BaremetalClient(base.BaremetalClient):
"""
Base Tempest REST client for Ironic API v1.
"""
version = '1'
uri_prefix = 'v1'
@base.handle_errors
def list_nodes(self, **kwargs):
"""List all existing nodes."""
return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_chassis_nodes(self, chassis_uuid):
"""List all nodes associated with a chassis."""
return self._list_request('/chassis/%s/nodes' % chassis_uuid)
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_node_ports(self, uuid):
"""List all ports associated with the node."""
return self._list_request('/nodes/%s/ports' % uuid)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""
Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_node_by_instance_uuid(self, instance_uuid):
"""
Gets a node associated with given instance uuid.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
return self._show_request('nodes',
uuid=None,
uri=uri)
@base.handle_errors
def show_chassis(self, uuid):
"""
Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""
Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
@base.handle_errors
def show_port_by_address(self, address):
"""
Gets a specific port by address.
:param address: MAC address of the port.
:return: Serialized port as a dictionary.
"""
uri = '/ports/detail?address=%s' % address
return self._show_request('ports', uuid=None, uri=uri)
def show_driver(self, driver_name):
"""
Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id=None, **kwargs):
"""
Create a baremetal node with the specified parameters.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpus: Number of CPUs. Default: 8.
:param local_gb: Disk size. Default: 1024.
:param memory_mb: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpus': kwargs.get('cpus', 8),
'local_gb': kwargs.get('local_gb', 1024),
'memory_mb': kwargs.get('memory_mb', 4096)},
'driver': kwargs.get('driver', 'fake')}
return self._create_request('nodes', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""
Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
return self._create_request('chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', port)
@base.handle_errors
def delete_node(self, uuid):
"""
Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""
Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""
Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def update_node(self, uuid, **kwargs):
"""
Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpus',
'properties/local_gb',
'properties/memory_mb',
'driver',
'instance_uuid')
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""
Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""
Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""
Get all driver interfaces of a specific node.
:param uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
@base.handle_errors
def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
"""
Set the boot device of the specified node.
:param node_uuid: The unique identifier of the node.
:param boot_device: The boot device name.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
"""
request = {'boot_device': boot_device, 'persistent': persistent}
resp, body = self._put_request('nodes/%s/management/boot_device' %
node_uuid, request)
self.expected_success(204, resp.status)
return body
@base.handle_errors
def get_node_boot_device(self, node_uuid):
"""
Get the current boot device of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_node_supported_boot_devices(self, node_uuid):
"""
Get the supported boot devices of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device/supported' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_console(self, node_uuid):
"""
Get connection information about the console.
:param node_uuid: Unique identifier of the node in UUID format.
"""
resp, body = self._show_request('nodes/states/console', node_uuid)
self.expected_success(200, resp.status)
return resp, body
@base.handle_errors
def set_console_mode(self, node_uuid, enabled):
"""
Start and stop the node console.
:param node_uuid: Unique identifier of the node in UUID format.
:param enabled: Boolean value; whether to enable or disable the
console.
"""
enabled = {'enabled': enabled}
resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
enabled)
self.expected_success(202, resp.status)
return resp, body
| apache-2.0 |
gistic/PublicSpatialImpala | tests/util/plugin_runner.py | 10 | 4037 | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import logging
import os
import pkgutil
import sys
PLUGIN_DIR = os.path.join(os.environ['IMPALA_HOME'], 'tests', 'benchmark', 'plugins')
# Setup logging for this module.
logging.basicConfig(level=logging.INFO, format='%(filename)s: %(message)s')
LOG = logging.getLogger('plugin_runner')
LOG.setLevel(level=logging.INFO)
class PluginRunner(object):
''' Loads user specified plugins, if found, and initializes them.
Looks in /tests/plugins and searches each module for plugin_name. plugin_name
is the name of the class that the user has used to implement a plugin. If the class
is found, it is initialized and added to self.__plugins. If it's not found, an error
message is logged and the plugin in not loaded.
'''
def __init__(self, plugin_infos):
self.__available_modules = self.__get_plugin_modules()
self.__get_plugins_from_modules(plugin_infos)
@property
def plugins(self):
return self.__plugins
def __getstate__(self):
state = self.__dict__.copy()
del state['__available_modules']
return state
def __get_plugin_modules(self):
''' Gets all the modules in the directory and imports them'''
modules = pkgutil.iter_modules(path=[PLUGIN_DIR])
available_modules = []
for loader, mod_name, ispkg in modules:
yield __import__("tests.benchmark.plugins.%s" % mod_name, fromlist=[mod_name])
def __get_plugins_from_modules(self, plugin_infos):
'''Look for user specified plugins in the available modules.'''
self.__plugins = []
plugin_names = []
for module in self.__available_modules:
for plugin_info in plugin_infos:
plugin_name, scope = self.__get_plugin_info(plugin_info)
plugin_names.append(plugin_name)
if hasattr(module, plugin_name):
self.__plugins.append(getattr(module, plugin_name)(scope=scope.lower()))
# The plugin(s) that could not be loaded are captured in the set difference
# between plugin_names and self.__plugins
plugins_found = [p.__name__ for p in self.__plugins]
LOG.debug("Plugins found: %s" % ', '.join(plugins_found))
plugins_not_found = set(plugin_names).difference(plugins_found)
# If the user's entered a plugin that does not exist, raise an error.
if len(plugins_not_found):
msg = "Plugin(s) not found: %s" % (','.join(list(plugins_not_found)))
raise RuntimeError, msg
def __get_plugin_info(self, plugin_info):
info = plugin_info.split(':')
if len(info) == 1:
return info[0], 'query'
elif len(info) == 2:
return info[0], info[1]
else:
raise ValueError("Plugin names specified in the form <plugin_name>[:<scope>]")
def print_plugin_names(self):
for p in self.__plugins:
LOG.debug("Plugin: %s, Scope: %s" % (p.__name__, p.scope))
def run_plugins_pre(self, context=None, scope=None):
if len(self.__plugins) == 0: return
if context: context['scope'] = scope
for p in self.__plugins:
if not scope or p.scope == scope.lower():
LOG.debug('Running pre-hook for %s at scope %s' % (p.__name__, scope))
p.run_pre_hook(context=context)
def run_plugins_post(self, context=None, scope=None):
if len(self.__plugins) == 0: return
for p in self.__plugins:
if not scope or p.scope == scope.lower():
LOG.debug('Running post-hook for %s at scope %s' % (p.__name__, scope))
p.run_post_hook(context=context)
| apache-2.0 |
ggs134/snackmean | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 1869 | 1247 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| mit |
vipul-sharma20/oh-mainline | vendor/packages/celery/celery/concurrency/gevent.py | 18 | 2529 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from time import time
if not os.environ.get("GEVENT_NOPATCH"):
from gevent import monkey
monkey.patch_all()
from ..utils import timer2
from .base import apply_target, BasePool
class Schedule(timer2.Schedule):
def __init__(self, *args, **kwargs):
from gevent.greenlet import Greenlet, GreenletExit
class _Greenlet(Greenlet):
def cancel(self):
self.kill()
self._Greenlet = _Greenlet
self._GreenletExit = GreenletExit
super(Schedule, self).__init__(*args, **kwargs)
self._queue = set()
def enter(self, entry, eta=None, priority=0):
try:
eta = timer2.to_timestamp(eta)
except OverflowError:
if not self.handle_error(sys.exc_info()):
raise
now = time()
if eta is None:
eta = now
secs = max(eta - now, 0)
g = self._Greenlet.spawn_later(secs, entry)
self._queue.add(g)
g.link(self._entry_exit)
g.entry = entry
g.eta = eta
g.priority = priority
g.cancelled = False
return g
def _entry_exit(self, g):
try:
g.kill()
finally:
self._queue.discard(g)
def clear(self):
queue = self._queue
while queue:
try:
queue.pop().kill()
except KeyError:
pass
@property
def queue(self):
return [(g.eta, g.priority, g.entry) for g in self._queue]
class Timer(timer2.Timer):
Schedule = Schedule
def ensure_started(self):
pass
def stop(self):
self.schedule.clear()
def start(self):
pass
class TaskPool(BasePool):
Timer = Timer
signal_safe = False
rlimit_safe = False
is_green = True
def __init__(self, *args, **kwargs):
from gevent import spawn_raw
from gevent.pool import Pool
self.Pool = Pool
self.spawn_n = spawn_raw
super(TaskPool, self).__init__(*args, **kwargs)
def on_start(self):
self._pool = self.Pool(self.limit)
def on_stop(self):
if self._pool is not None:
self._pool.join()
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
return self._pool.spawn(apply_target, target, args, kwargs,
callback, accept_callback)
| agpl-3.0 |
CyanogenMod/android_kernel_samsung_epicmtd | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
blighj/django | tests/model_inheritance/tests.py | 31 | 17258 | from operator import attrgetter
from django.core.exceptions import FieldError, ValidationError
from django.db import connection, models
from django.test import SimpleTestCase, TestCase
from django.test.utils import CaptureQueriesContext, isolate_apps
from .models import (
Base, Chef, CommonInfo, GrandChild, GrandParent, ItalianRestaurant,
MixinModel, ParkingLot, Place, Post, Restaurant, Student, SubBase,
Supplier, Title, Worker,
)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __str__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(str(w1), "Worker Fred")
self.assertEqual(str(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertSequenceEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
with self.assertRaises(AttributeError):
CommonInfo.objects.all()
def test_reverse_relation_for_different_hierarchy_tree(self):
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
with self.assertRaises(FieldError):
Restaurant.objects.filter(supplier__name="foo")
def test_model_with_distinct_accessors(self):
# The Post model has distinct accessors for the Comment and Link models.
post = Post.objects.create(title="Lorem Ipsum")
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
with self.assertRaises(AttributeError):
getattr(post, "attached_%(class)s_set")
def test_model_with_distinct_related_query_name(self):
self.assertQuerysetEqual(Post.objects.filter(attached_model_inheritance_comments__is_spam=True), [])
# The Post model doesn't have a related query accessor based on
# related_name (attached_comment_set).
msg = "Cannot resolve keyword 'attached_comment_set' into field."
with self.assertRaisesMessage(FieldError, msg):
Post.objects.filter(attached_comment_set__is_spam=True)
def test_meta_fields_and_ordering(self):
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs",
"serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs",
"serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
def test_custompk_m2m(self):
b = Base.objects.create()
b.titles.add(Title.objects.create(title="foof"))
s = SubBase.objects.create(sub_id=b.id)
b = Base.objects.get(pk=s.id)
self.assertNotEqual(b.pk, s.pk)
# Low-level test for related_val
self.assertEqual(s.titles.related_val, (s.id,))
# Higher level test for correct query values (title foof not
# accidentally found).
self.assertQuerysetEqual(s.titles.all(), [])
def test_update_parent_filtering(self):
"""
Updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query (#10399).
"""
supplier = Supplier.objects.create(
name='Central market',
address='610 some street',
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]['sql']
# Capture the queries executed when a subclassed model instance is saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=('name',))
for query in captured_queries:
sql = query['sql']
if 'UPDATE' in sql:
self.assertEqual(expected_sql, sql)
def test_eq(self):
# Equality doesn't transfer in multitable inheritance.
self.assertNotEqual(Place(id=1), Restaurant(id=1))
self.assertNotEqual(Restaurant(id=1), Place(id=1))
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
@isolate_apps('model_inheritance')
def test_abstract_parent_link(self):
class A(models.Model):
pass
class B(A):
a = models.OneToOneField('A', parent_link=True, on_delete=models.CASCADE)
class Meta:
abstract = True
class C(B):
pass
self.assertIs(C._meta.parents[A], C._meta.get_field('a'))
class ModelInheritanceDataTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.restaurant = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2,
)
chef = Chef.objects.create(name="Albert")
cls.italian_restaurant = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=chef,
)
def test_filter_inherited_model(self):
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
def test_update_inherited_model(self):
self.italian_restaurant.address = "1234 W. Elm"
self.italian_restaurant.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
def test_parent_fields_available_for_filtering_in_child_model(self):
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
def test_filter_on_parent_returns_object_of_parent_type(self):
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
def test_parent_child_one_to_one_link(self):
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
Place.objects.get(name="Demon Dogs").restaurant,
Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
def test_parent_child_one_to_one_link_on_nonrelated_objects(self):
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
with self.assertRaises(ItalianRestaurant.DoesNotExist):
Place.objects.get(name="Demon Dogs").restaurant.italianrestaurant
def test_inherited_does_not_exist_exception(self):
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
with self.assertRaises(Place.DoesNotExist):
ItalianRestaurant.objects.get(name="The Noodle Void")
def test_inherited_multiple_objects_returned_exception(self):
# MultipleObjectsReturned is also inherited.
with self.assertRaises(Place.MultipleObjectsReturned):
Restaurant.objects.get()
def test_related_objects_for_inherited_models(self):
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers .set([self.restaurant, self.italian_restaurant])
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers.set([self.italian_restaurant])
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
self.italian_restaurant.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=self.italian_restaurant
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
def test_update_works_on_parent_and_child_models_at_once(self):
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=self.restaurant.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
def test_values_works_on_parent_model_fields(self):
# The values() command also works on fields from parent models.
self.assertSequenceEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"},
],
)
def test_select_related_works_on_parent_model_fields(self):
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(
2, lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(
1, lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_select_related_defer(self):
"""
#23370 - Should be able to defer child fields when using
select_related() from parent to child.
"""
qs = (Restaurant.objects.select_related("italianrestaurant")
.defer("italianrestaurant__serves_gnocchi").order_by("rating"))
# The field was actually deferred
with self.assertNumQueries(2):
objs = list(qs.all())
self.assertTrue(objs[1].italianrestaurant.serves_gnocchi)
# Model fields where assigned correct values
self.assertEqual(qs[0].name, 'Demon Dogs')
self.assertEqual(qs[0].rating, 2)
self.assertEqual(qs[1].italianrestaurant.name, 'Ristorante Miron')
self.assertEqual(qs[1].italianrestaurant.rating, 4)
def test_update_query_counts(self):
"""
Update queries do not generate unnecessary queries (#18304).
"""
with self.assertNumQueries(3):
self.italian_restaurant.save()
def test_filter_inherited_on_null(self):
# Refs #12567
Supplier.objects.create(
name="Central market",
address="610 some street",
)
self.assertQuerysetEqual(
Place.objects.filter(supplier__isnull=False), [
"Central market",
],
attrgetter("name")
)
self.assertQuerysetEqual(
Place.objects.filter(supplier__isnull=True).order_by("name"), [
"Demon Dogs",
"Ristorante Miron",
],
attrgetter("name")
)
def test_exclude_inherited_on_null(self):
# Refs #12567
Supplier.objects.create(
name="Central market",
address="610 some street",
)
self.assertQuerysetEqual(
Place.objects.exclude(supplier__isnull=False).order_by("name"), [
"Demon Dogs",
"Ristorante Miron",
],
attrgetter("name")
)
self.assertQuerysetEqual(
Place.objects.exclude(supplier__isnull=True), [
"Central market",
],
attrgetter("name")
)
@isolate_apps('model_inheritance', 'model_inheritance.tests')
class InheritanceSameModelNameTests(SimpleTestCase):
def test_abstract_fk_related_name(self):
related_name = '%(app_label)s_%(class)s_references'
class Referenced(models.Model):
class Meta:
app_label = 'model_inheritance'
class AbstractReferent(models.Model):
reference = models.ForeignKey(Referenced, models.CASCADE, related_name=related_name)
class Meta:
app_label = 'model_inheritance'
abstract = True
class Referent(AbstractReferent):
class Meta:
app_label = 'model_inheritance'
LocalReferent = Referent
class Referent(AbstractReferent):
class Meta:
app_label = 'tests'
ForeignReferent = Referent
self.assertFalse(hasattr(Referenced, related_name))
self.assertTrue(Referenced.model_inheritance_referent_references.rel.model, LocalReferent)
self.assertTrue(Referenced.tests_referent_references.rel.model, ForeignReferent)
class InheritanceUniqueTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.grand_parent = GrandParent.objects.create(
email='grand_parent@example.com',
first_name='grand',
last_name='parent',
)
def test_unique(self):
grand_child = GrandChild(
email=self.grand_parent.email,
first_name='grand',
last_name='child',
)
msg = 'Grand parent with this Email already exists.'
with self.assertRaisesMessage(ValidationError, msg):
grand_child.validate_unique()
def test_unique_together(self):
grand_child = GrandChild(
email='grand_child@example.com',
first_name=self.grand_parent.first_name,
last_name=self.grand_parent.last_name,
)
msg = 'Grand parent with this First name and Last name already exists.'
with self.assertRaisesMessage(ValidationError, msg):
grand_child.validate_unique()
| bsd-3-clause |
YangChihWei/w16b_test | static/Brython3.1.3-20150514-095342/Lib/imp.py | 637 | 9839 | """This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_builtin, init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import load_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
load_dynamic = None
# Directly exposed by this module
from importlib._bootstrap import new_module
from importlib._bootstrap import cache_from_source, source_from_cache
from importlib import _bootstrap
#fixme brython
#from importlib import machinery
import importlib.machinery as machinery
import os
import sys
import tokenize
import warnings
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def get_magic():
"""Return the magic number for .pyc or .pyo files."""
return _bootstrap._MAGIC_BYTES
def get_tag():
"""Return the magic tag for .pyc or .pyo files."""
return sys.implementation.cache_tag
def get_suffixes():
warnings.warn('imp.get_suffixes() is deprecated; use the constants '
'defined on importlib.machinery instead',
DeprecationWarning, 2)
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'U', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""Null import object."""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibiilty support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, _bootstrap.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
#brython fix me
pass
def load_source(name, pathname, file=None):
msg = ('imp.load_source() is deprecated; use '
'importlib.machinery.SourceFileLoader(name, pathname).load_module()'
' instead')
warnings.warn(msg, DeprecationWarning, 2)
_LoadSourceCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourceFileLoader(name, pathname)
return module
class _LoadCompiledCompatibility(_HackedGetData,
_bootstrap.SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
#brython fix me
pass
def load_compiled(name, pathname, file=None):
msg = ('imp.load_compiled() is deprecated; use '
'importlib.machinery.SourcelessFileLoader(name, pathname).'
'load_module() instead ')
warnings.warn(msg, DeprecationWarning, 2)
_LoadCompiledCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourcelessFileLoader(name, pathname)
return module
def load_package(name, path):
msg = ('imp.load_package() is deprecated; use either '
'importlib.machinery.SourceFileLoader() or '
'importlib.machinery.SourcelessFileLoader() instead')
warnings.warn(msg, DeprecationWarning, 2)
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
return _bootstrap.SourceFileLoader(name, path).load_module(name)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_bootstrap._ERR_MSG.format(name), name=name)
encoding = None
if mode == 'U':
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or type(module) != type(sys):
raise TypeError("reload() argument must be module")
name = module.__name__
if name not in sys.modules:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name and parent_name not in sys.modules:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
module.__loader__.load_module(name)
# The module may have replaced itself in sys.modules!
return sys.modules[module.__name__]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
| agpl-3.0 |
Fusion-Rom/android_external_chromium_org | build/go/go.py | 26 | 1872 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script invokes the go build tool.
Must be called as follows:
python go.py <go-binary> <build directory> <output file> <src directory>
<CGO_CFLAGS> <CGO_LDFLAGS> <go-binary options>
eg.
python go.py /usr/lib/google-golang/bin/go out/build out/a.out .. "-I."
"-L. -ltest" test -c test/test.go
"""
import argparse
import os
import shutil
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('go_binary')
parser.add_argument('build_directory')
parser.add_argument('output_file')
parser.add_argument('src_root')
parser.add_argument('cgo_cflags')
parser.add_argument('cgo_ldflags')
parser.add_argument('go_option', nargs='*')
args = parser.parse_args()
go_binary = args.go_binary
build_dir = args.build_directory
out_file = os.path.abspath(args.output_file)
# The src directory specified is relative. We need this as an absolute path.
src_root = os.path.abspath(args.src_root)
# GOPATH must be absolute, and point to one directory up from |src_Root|
go_path = os.path.abspath(os.path.join(src_root, ".."))
go_options = args.go_option
try:
shutil.rmtree(build_dir, True)
os.mkdir(build_dir)
except:
pass
old_directory = os.getcwd()
os.chdir(build_dir)
os.environ["GOPATH"] = go_path
os.environ["CGO_CFLAGS"] = args.cgo_cflags
os.environ["CGO_LDFLAGS"] = args.cgo_ldflags
os.system("%s %s" % (go_binary, " ".join(go_options)))
out_files = [ f for f in os.listdir(".") if os.path.isfile(f)]
if (len(out_files) > 0):
shutil.move(out_files[0], out_file)
os.chdir(old_directory)
try:
shutil.rmtree(build_dir, True)
except:
pass
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
xtompok/osmawalk | scripts/filter/parse.py | 1 | 8482 | #!/usr/bin/env python2
import sys
sys.path.append("../imposm-parser")
import time
import sys
import yaml
import pyproj
import premap_pb2 as pb
import types_pb2 as pbtypes
from utils import nodeWays, deleteAloneNodes
from Map import Map
from imposm.parser import OSMParser
scale = 10
class classifier():
""" Class for parsing OSM XML and classifying elements"""
def __init__(self,waysConf,areasConf,bridgesConf,tunnelsConf,srtm):
self.proj = pyproj.Proj(proj='utm', zone=33, ellps='WGS84')
self.Map = Map()
self.waysConf = waysConf
self.areasConf = areasConf
self.bridgesConf = bridgesConf
self.tunnelsConf = tunnelsConf
self.srtm = srtm
def calcHeight(self,lon, lat):
""" Calculate height of given point"""
lon -= self.srtm.minlon
lat -= self.srtm.minlat
lon *= 1200
lat *= 1200
intlon = int(lon)
intlat = int(lat)
ll = self.srtm.raster[intlat][intlon]
lr = self.srtm.raster[intlat][intlon+1]
ul = self.srtm.raster[intlat+1][intlon]
ur = self.srtm.raster[intlat+1][intlon+1]
left = (lat-intlat)*ll+(1-lat+intlat)*ul
right = (lat-intlat)*lr+(1-lat+intlat)*ur
height = (lon-intlon)*left+(1-lon+intlon)*right
return int(height)
def ways_cb(self,ways):
""" Way callback"""
for osmid,tags,refs in ways:
pbway = pb.Way()
pbway.id = int(osmid)
pbway.refs.extend(map(int,refs))
priority = -1
if "boundary" in tags:
continue
for key in tags.keys():
if key in waysConf:
if waysConf[key].keys()==["*"] and pbway.type == pbtypes.WAY:
(atype,apriority)=waysConf[key]["*"]
if priority<apriority:
pbway.type = atype
priority = apriority
val = tags[key]
if val in waysConf[key]:
(atype,apriority)=waysConf[key][val]
if priority<apriority:
pbway.type = atype
priority = apriority
if key in areasConf:
if areasConf[key].keys()==["*"]:
pbway.area = areasConf[key]["*"]
val = tags[key]
if val in areasConf[key]:
pbway.area = areasConf[key][val]
if key in bridgesConf:
if bridgesConf[key].keys()==["*"]:
pbway.bridge = bridgesConf[key]["*"]
val = tags[key]
if val in bridgesConf[key]:
pbway.bridge = bridgesConf[key][val]
if key in tunnelsConf:
if tunnelsConf[key].keys()==["*"]:
pbway.tunnel = tunnelsConf[key]["*"]
val = tags[key]
if val in tunnelsConf[key]:
pbway.tunnel = tunnelsConf[key][val]
if pbway.type == pbtypes.IGNORE:
pbway.render = False
else:
pbway.render = True
if pbway.type == pbtypes.IGNORE or pbway.type == pbtypes.WAY:
continue
self.Map.ways.append(pbway)
def nodes_cb(self,nodes):
""" Nodes callback"""
for osmid,tags,coords in nodes:
pbnode = pb.Node()
pbnode.id = int(osmid)
pbnode.height = self.calcHeight(coords[0],coords[1])
(lon,lat) = self.proj(coords[0],coords[1])
pbnode.lat = int(lat*scale)
pbnode.lon = int(lon*scale)
self.Map.nodes.append(pbnode)
def coords_cb(self,coords):
""" Callback for nodes with no attributes"""
for (osmid,lon,lat) in coords:
pbnode = pb.Node()
pbnode.id = int(osmid)
pbnode.height = self.calcHeight(lon,lat)
(lon,lat) = self.proj(lon,lat)
pbnode.lat = int(lat*scale)
pbnode.lon = int(lon*scale)
self.Map.nodes.append(pbnode)
def relations_cb(self,relations):
""" Relations callback"""
for osmid,tags,refs in relations:
if not ("type" in tags and tags["type"]=="multipolygon"):
pbpol = pb.Multipolygon()
priority=-1
for key in tags.keys():
if key in waysConf:
if waysConf[key].keys()==["*"]:
(atype,apriority)=waysConf[key]["*"]
if priority<apriority:
pbpol.type = atype
priority = apriority
val = tags[key]
if val in waysConf[key]:
(atype,apriority)=waysConf[key][val]
if priority<apriority:
pbpol.type = atype
priority = apriority
pbpol.id = int(osmid)
pbpol.refs.extend([int(item[0]) for item in refs])
pbpol.roles.extend([pbpol.OUTER if item[2]=="outer" else pbpol.INNER for item in refs])
self.Map.multipols.append(pbpol)
class SRTM:
""" Class for SRTM data """
minlon = 0
minlat = 0
maxlon = 0
maxlat = 0
raster = [[]]
def loadSRTM(filename):
""" Load SRTM from file"""
with open(filename) as srtmfile:
srtm = SRTM()
(srtm.minlon, srtm.minlat, srtm.maxlon, srtm.maxlat) = [ int(x) for x in srtmfile.readline().split(" ")]
srtm.raster=[[int(y) for y in x.split(" ")[:-2]] for x in srtmfile]
return srtm
def loadWaysConf(filename):
""" Load configuration for classifying ways from file"""
config = {}
with open(filename) as conffile:
config = yaml.load(conffile.read())
desc = pbtypes._OBJTYPE # depends on implementation detail, but no other way
str2pb = {}
for i in desc.values:
str2pb[i.name] = i.number
ways={}
for cat in config.keys():
catenum = str2pb[cat]
for priority in config[cat]:
for key in config[cat][priority]:
if key not in ways:
ways[key] = {}
for value in config[cat][priority][key]:
ways[key][value]=(catenum,priority)
return ways
def loadBoolConf(filename):
""" Load generic configuration from file, which returns for OSM attributes only True of False"""
config = {}
with open(filename) as conffile:
config = yaml.load(conffile.read())
boolcfg = {}
for key in config[True]:
if key not in boolcfg:
boolcfg[key] = {}
for value in config[True][key]:
boolcfg[key][value]=True
for key in config[False]:
if key not in boolcfg:
boolcfg[key] = {}
for value in config[False][key]:
boolcfg[key][value]=False
return boolcfg
def markNonPlanarNodes(amap):
""" Mark nodes, which are above or below ground """
for way in amap.ways:
if way.bridge:
for nid in way.refs:
amap.nodes[amap.nodesIdx[nid]].onBridge=True
if way.tunnel:
for nid in way.refs:
amap.nodes[amap.nodesIdx[nid]].inTunnel=True
def parseOSMfile(filename,waysConf,areasConf,bridgesConf,tunnelsConf,srtm):
""" Parse OSM file """
clas = classifier(waysConf,areasConf,bridgesConf,tunnelsConf,srtm)
p = OSMParser(concurrency=4, ways_callback=clas.ways_cb, nodes_callback=clas.nodes_cb, relations_callback=clas.relations_cb, coords_callback=clas.coords_cb)
p.parse(filename)
clas.Map.nodes.sort(key=lambda node: node.id)
clas.Map.ways.sort(key=lambda way: way.id)
for i in range(len(clas.Map.nodes)):
clas.Map.nodesIdx[clas.Map.nodes[i].id]=i
return clas.Map
# Where find YAML config files
configDir="../../config/"
# Where save PBF file
dataDir="../../data/"
# Load config from YAML
waysConf=loadWaysConf(configDir+"types.yaml")
areasConf=loadBoolConf(configDir+"area.yaml")
bridgesConf=loadBoolConf(configDir+"bridge.yaml")
tunnelsConf=loadBoolConf(configDir+"tunnel.yaml")
# Load SRTM file
srtm = loadSRTM("../../osm/heights.txt")
# Parse file
start = time.time()
amap = parseOSMfile("../../osm/praha.osm",waysConf,areasConf,bridgesConf,tunnelsConf,srtm)
end = time.time()
print "Parsing took "+str(end-start)
# Delete nodes without ways
start = time.time()
nodeways=nodeWays(amap)
amap = deleteAloneNodes(amap,nodeways)
end = time.time()
print "Deleting alone nodes took "+str(end-start)
# Mark nodes in tunnels and on bridges
amap.updateNodesIdx()
markNonPlanarNodes(amap)
# Write map to file
start = time.time()
outfile = open(dataDir+"praha-pre.pbf","w")
outfile.write(amap.toPB().SerializeToString())
outfile.close()
end = time.time()
print "Saving took "+str(end-start)
print "Ways:",len(amap.ways)
print "Nodes:",len(amap.nodes)
print "Multipolygons:",len(amap.multipols)
| mit |
nirizr/rematch | idaplugin/rematch/collectors/annotations/annotation.py | 1 | 2204 | import collections
import uuid
from .. import collector
class Annotation(collector.Collector):
pass
class DependencyAnnotation(Annotation):
"""This class implements utilities related to attribute dependencies.
Allowing attributes to define dependency relationships between them (such as
a function prototype and a structure used in it for example).
For that reason this class assigns and retrieves UUIDs according to class
and an annotation specific definition (such as a structure name).
To use attribute dependencies, simply inherit DependencyAnnotation instead of
Annotation in both the dependent and dependency classes. To define a
dependency relationship, call depend_on from the dependent class specifying
the dependency class and unique identifying class defined name of a
dependency."""
dependency_uuids = collections.defaultdict(uuid.uuid4)
dependencies = set()
def __init__(self, *args, **kwargs):
super(DependencyAnnotation, self).__init__(*args, **kwargs)
self.uuid = self.dependency_uuids[self.id()]
def dependency_name(self):
raise NotImplementedError("DependencyAnnotation classes must implement "
"the dependency_name method in a way that will "
"return the name used by dependent classes when "
"calling depend_on. Object name or offset are "
"good choices.")
@classmethod
def cls_dependency_id(cls, dependency_name):
return (cls.__name__, dependency_name)
def id(self):
return self.cls_dependency_id(self.dependency_name())
def serialize(self):
s = super(DependencyAnnotation, self).serialize()
if s:
s["uuid"] = str(self.uuid)
return s
def depend_on(self, dependency_class, dependency_name):
dependency_id = dependency_class.cls_dependency_id(dependency_name)
dependency_uuid = self.dependency_uuids[dependency_id]
self.dependencies.add((str(self.uuid), str(dependency_uuid)))
return dependency_uuid
@classmethod
def get_dependencies(cls):
for dependent, dependency in cls.dependencies:
yield {'dependent': dependent, 'dependency': dependency}
| gpl-3.0 |
lihui7115/ChromiumGStreamerBackend | tools/perf/page_sets/typical_10_mobile.py | 35 | 2852 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class Typical10MobilePage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(Typical10MobilePage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path = 'data/credentials.json',
shared_page_state_class=shared_page_state.SharedMobilePageState)
self.archive_data_file = 'data/typical_10_mobile.json'
def RunPageInteractions(self, action_runner):
action_runner.Wait(20)
action_runner.ScrollPage()
action_runner.Wait(20)
class Typical10MobileReloadPage(Typical10MobilePage):
def __init__(self, url, page_set, name=''):
super(Typical10MobileReloadPage, self).__init__(
url=url, page_set=page_set, name=name,)
def RunPageInteractions(self, action_runner):
for _ in range(0, 5):
action_runner.ReloadPage()
action_runner.WaitForJavaScriptCondition(
'document.readyState === "complete"')
urls_list = [
# Why: Top site
'http://m.facebook.com/barackobama',
# Why: Wikipedia article with lots of pictures, German language
'http://de.m.wikipedia.org/wiki/K%C3%B6lner_Dom',
# Why: current top Q&A on popular Japanese site
'http://m.chiebukuro.yahoo.co.jp/detail/q10136829180',
# Why: news article on popular site
'http://m.huffpost.com/us/entry/6004486',
# Why: news article on popular site
'http://www.cnn.com/2014/03/31/showbiz/tv/himym-finale/index.html',
# Why: Popular RTL language site
'http://m.ynet.co.il',
# Why: Popular Russian language site
'http://www.rg.ru/2014/10/21/cska-site.html',
# Why: Popular shopping site
'http://m.ebay.com/itm/351157205404',
# Why: Popular viral site, lots of images
'http://siriuslymeg.tumblr.com/',
# Why: Popular Chinese language site.
'http://wapbaike.baidu.com/',
]
class Typical10MobilePageSet(story.StorySet):
"""10 typical mobile pages, used for power testing."""
def __init__(self):
super(Typical10MobilePageSet, self).__init__(
archive_data_file='data/typical_10_mobile.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
for url in urls_list:
self.AddStory(Typical10MobilePage(url, self))
class Typical10MobileReloadPageSet(story.StorySet):
"""10 typical mobile pages, used for reloading power testing."""
def __init__(self):
super(Typical10MobileReloadPageSet, self).__init__(
archive_data_file='data/typical_10_mobile.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
for url in urls_list:
self.AddStory(Typical10MobileReloadPage(url, self))
| bsd-3-clause |
ferranti/watchdog | vendor/rdflib-2.4.0/test/n3.py | 3 | 2707 | from rdflib import *
input = """
# Definitions of terms describing the n3 model
#
@keywords a.
@prefix n3: <#>.
@prefix log: <log.n3#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix : <#> .
@forAll :s, :p, :x, :y, :z.
n3:Statement a rdf:Class .
n3:StatementSet a rdf:Class .
n3:includes a rdfs:Property . # Cf rdf:li
n3:predicate a rdf:Property; rdfs:domain n3:statement .
n3:subject a rdf:Property; rdfs:domain n3:statement .
n3:object a rdf:Property; rdfs:domain n3:statement .
n3:context a rdf:Property; rdfs:domain n3:statement;
rdfs:range n3:StatementSet .
########### Rules
{ :x :p :y . } log:means { [
n3:subject :x;
n3:predicate :p;
n3:object :y ] a log:Truth}.
# Needs more thought ... ideally, we have the implcit AND rules of
# juxtaposition (introduction and elimination)
{
{
{ :x n3:includes :s. } log:implies { :y n3:includes :s. } .
} forall :s1 .
} log:implies { :x log:implies :y } .
{
{
{ :x n3:includes :s. } log:implies { :y n3:includes :s. } .
} forall :s1
} log:implies { :x log:implies :y } .
# I think n3:includes has to be axiomatic builtin. - unless you go to syntax description.
# syntax.n3?
"""
import unittest
from rdflib.Graph import Graph, ConjunctiveGraph
class N3TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testFileName(self):
input = """
@prefix : <http://www.example.com/> .
:foo.txt :p :q .
"""
g = Graph()
try:
g.parse(StringInputSource(input), format="n3")
except:
pass
#foo.txt is not a valid qname in n3/turtle
else:
self.assertEquals(True, False) # Didn't get expected result of a parse exception
# This isn't the expected result based on my reading of n3 bits
#s = g.value(predicate=URIRef("http://www.example.com/p"), object=URIRef("http://www.example.com/q"))
#self.assertEquals(s, URIRef("http://www.example.org/foo.txt"))
def testModel(self):
g = ConjunctiveGraph()
g.parse(StringInputSource(input), format="n3")
i = 0
for s, p, o in g:
if isinstance(s, Graph):
i += 1
self.assertEquals(i, 3)
self.assertEquals(len(list(g.contexts())), 13)
g.close()
def testParse(self):
g = ConjunctiveGraph()
g.parse("http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3", format="n3")
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
igemsoftware/SYSU-Software2013 | project/Python27_32/Lib/site-packages/gevent_websocket-0.3.6-py2.7.egg/geventwebsocket/websocket.py | 3 | 11900 | import struct
from errno import EINTR
from gevent.coros import Semaphore
from python_fixes import makefile, is_closed
from exceptions import FrameTooLargeException, WebSocketError
class WebSocket(object):
def _encode_text(self, text):
if isinstance(text, unicode):
return text.encode('utf-8')
else:
return text
class WebSocketHixie(WebSocket):
def __init__(self, socket, environ):
self.origin = environ.get('HTTP_ORIGIN')
self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')
self.path = environ.get('PATH_INFO')
self.fobj = socket.makefile()
self._writelock = Semaphore(1)
self._write = socket.sendall
def send(self, message):
message = self._encode_text(message)
with self._writelock:
self._write("\x00" + message + "\xFF")
def close(self):
if self.fobj is not None:
self.fobj.close()
self.fobj = None
self._write = None
def _message_length(self):
length = 0
while True:
if self.fobj is None:
raise WebSocketError('Connection closed unexpectedly while reading message length')
byte_str = self.fobj.read(1)
if not byte_str:
return 0
else:
byte = ord(byte_str)
if byte != 0x00:
length = length * 128 + (byte & 0x7f)
if (byte & 0x80) != 0x80:
break
return length
def _read_until(self):
bytes = []
read = self.fobj.read
while True:
if self.fobj is None:
msg = ''.join(bytes)
raise WebSocketError('Connection closed unexpectedly while reading message: %r' % msg)
byte = read(1)
if ord(byte) != 0xff:
bytes.append(byte)
else:
break
return ''.join(bytes)
def receive(self):
read = self.fobj.read
while self.fobj is not None:
frame_str = read(1)
if not frame_str:
self.close()
return
else:
frame_type = ord(frame_str)
if frame_type == 0x00:
bytes = self._read_until()
return bytes.decode("utf-8", "replace")
else:
raise WebSocketError("Received an invalid frame_type=%r" % frame_type)
class WebSocketHybi(WebSocket):
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xA
def __init__(self, socket, environ):
self.origin = environ.get('HTTP_SEC_WEBSOCKET_ORIGIN')
self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'unknown')
self.path = environ.get('PATH_INFO')
self._chunks = bytearray()
self._writelock = Semaphore(1)
self.socket = socket
self._write = socket.sendall
self.fobj = makefile(socket)
self.close_code = None
self.close_message = None
self._reading = False
def _parse_header(self, data):
if len(data) != 2:
self._close()
raise WebSocketError('Incomplete read while reading header: %r' % data)
first_byte, second_byte = struct.unpack('!BB', data)
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if rsv1 or rsv2 or rsv3:
self.close(1002)
raise WebSocketError('Received frame with non-zero reserved bits: %r' % str(data))
if opcode > 0x7 and fin == 0:
self.close(1002)
raise WebSocketError('Received fragmented control frame: %r' % str(data))
if len(self._chunks) > 0 and fin == 0 and not opcode:
self.close(1002)
raise WebSocketError('Received new fragment frame with non-zero opcode: %r' % str(data))
if len(self._chunks) > 0 and fin == 1 and (self.OPCODE_TEXT <= opcode <= self.OPCODE_BINARY):
self.close(1002)
raise WebSocketError('Received new unfragmented data frame during fragmented message: %r' % str(data))
has_mask = (second_byte >> 7) & 1
length = (second_byte) & 0x7f
# Control frames MUST have a payload length of 125 bytes or less
if opcode > 0x7 and length > 125:
self.close(1002)
raise FrameTooLargeException("Control frame payload cannot be larger than 125 bytes: %r" % str(data))
return fin, opcode, has_mask, length
def receive_frame(self):
"""Return the next frame from the socket."""
fobj = self.fobj
if fobj is None:
return
if is_closed(fobj):
return
read = self.fobj.read
assert not self._reading, 'Reading is not possible from multiple greenlets'
self._reading = True
try:
data0 = read(2)
if not data0:
self._close()
return
fin, opcode, has_mask, length = self._parse_header(data0)
if not has_mask and length:
self.close(1002)
raise WebSocketError('Message from client is not masked')
if length < 126:
data1 = ''
elif length == 126:
data1 = read(2)
if len(data1) != 2:
self.close()
raise WebSocketError('Incomplete read while reading 2-byte length: %r' % (data0 + data1))
length = struct.unpack('!H', data1)[0]
else:
assert length == 127, length
data1 = read(8)
if len(data1) != 8:
self.close()
raise WebSocketError('Incomplete read while reading 8-byte length: %r' % (data0 + data1))
length = struct.unpack('!Q', data1)[0]
mask = read(4)
if len(mask) != 4:
self._close()
raise WebSocketError('Incomplete read while reading mask: %r' % (data0 + data1 + mask))
mask = struct.unpack('!BBBB', mask)
if length:
payload = read(length)
if len(payload) != length:
self._close()
args = (length, len(payload))
raise WebSocketError('Incomplete read: expected message of %s bytes, got %s bytes' % args)
else:
payload = ''
if payload:
payload = bytearray(payload)
for i in xrange(len(payload)):
payload[i] = payload[i] ^ mask[i % 4]
return fin, opcode, payload
finally:
self._reading = False
if self.fobj is None:
fobj.close()
def _receive(self):
"""Return the next text or binary message from the socket."""
opcode = None
result = bytearray()
while True:
frame = self.receive_frame()
if frame is None:
if result:
raise WebSocketError('Peer closed connection unexpectedly')
return
f_fin, f_opcode, f_payload = frame
if f_opcode in (self.OPCODE_TEXT, self.OPCODE_BINARY):
if opcode is None:
opcode = f_opcode
else:
raise WebSocketError('The opcode in non-fin frame is expected to be zero, got %r' % (f_opcode, ))
elif not f_opcode:
if opcode is None:
self.close(1002)
raise WebSocketError('Unexpected frame with opcode=0')
elif f_opcode == self.OPCODE_CLOSE:
if len(f_payload) >= 2:
self.close_code = struct.unpack('!H', str(f_payload[:2]))[0]
self.close_message = f_payload[2:]
elif f_payload:
self._close()
raise WebSocketError('Invalid close frame: %s %s %s' % (f_fin, f_opcode, repr(f_payload)))
code = self.close_code
if code is None or (code >= 1000 and code < 5000):
self.close()
else:
self.close(1002)
raise WebSocketError('Received invalid close frame: %r %r' % (code, self.close_message))
return
elif f_opcode == self.OPCODE_PING:
self.send_frame(f_payload, opcode=self.OPCODE_PONG)
continue
elif f_opcode == self.OPCODE_PONG:
continue
else:
self._close() # XXX should send proper reason?
raise WebSocketError("Unexpected opcode=%r" % (f_opcode, ))
result.extend(f_payload)
if f_fin:
break
if opcode == self.OPCODE_TEXT:
return result, False
elif opcode == self.OPCODE_BINARY:
return result, True
else:
raise AssertionError('internal serror in gevent-websocket: opcode=%r' % (opcode, ))
def receive(self):
result = self._receive()
if not result:
return result
message, is_binary = result
if is_binary:
return message
else:
try:
return message.decode('utf-8')
except ValueError:
self.close(1007)
raise
def send_frame(self, message, opcode):
"""Send a frame over the websocket with message as its payload"""
if self.socket is None:
raise WebSocketError('The connection was closed')
header = chr(0x80 | opcode)
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_length = len(message)
if msg_length < 126:
header += chr(msg_length)
elif msg_length < (1 << 16):
header += chr(126) + struct.pack('!H', msg_length)
elif msg_length < (1 << 63):
header += chr(127) + struct.pack('!Q', msg_length)
else:
raise FrameTooLargeException()
try:
combined = header + message
except TypeError:
with self._writelock:
self._write(header)
self._write(message)
else:
with self._writelock:
self._write(combined)
def send(self, message, binary=None):
"""Send a frame over the websocket with message as its payload"""
if binary is None:
binary = not isinstance(message, (str, unicode))
if binary:
return self.send_frame(message, self.OPCODE_BINARY)
else:
return self.send_frame(message, self.OPCODE_TEXT)
def close(self, code=1000, message=''):
"""Close the websocket, sending the specified code and message"""
if self.socket is not None:
message = self._encode_text(message)
self.send_frame(struct.pack('!H%ds' % len(message), code, message), opcode=self.OPCODE_CLOSE)
self._close()
def _close(self):
if self.socket is not None:
self.socket = None
self._write = None
if not self._reading:
self.fobj.close()
self.fobj = None
| mit |
arokem/nipype | nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py | 9 | 2525 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.slicer.filtering.resamplescalarvectordwivolume import ResampleScalarVectorDWIVolume
def test_ResampleScalarVectorDWIVolume_inputs():
input_map = dict(Inverse_ITK_Transformation=dict(argstr='--Inverse_ITK_Transformation ',
),
Reference=dict(argstr='--Reference %s',
),
args=dict(argstr='%s',
),
centered_transform=dict(argstr='--centered_transform ',
),
defField=dict(argstr='--defField %s',
),
default_pixel_value=dict(argstr='--default_pixel_value %f',
),
direction_matrix=dict(argstr='--direction_matrix %s',
sep=',',
),
environ=dict(nohash=True,
usedefault=True,
),
hfieldtype=dict(argstr='--hfieldtype %s',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
image_center=dict(argstr='--image_center %s',
),
inputVolume=dict(argstr='%s',
position=-2,
),
interpolation=dict(argstr='--interpolation %s',
),
notbulk=dict(argstr='--notbulk ',
),
number_of_thread=dict(argstr='--number_of_thread %d',
),
origin=dict(argstr='--origin %s',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
rotation_point=dict(argstr='--rotation_point %s',
),
size=dict(argstr='--size %s',
sep=',',
),
spaceChange=dict(argstr='--spaceChange ',
),
spacing=dict(argstr='--spacing %s',
sep=',',
),
spline_order=dict(argstr='--spline_order %d',
),
terminal_output=dict(nohash=True,
),
transform=dict(argstr='--transform %s',
),
transform_matrix=dict(argstr='--transform_matrix %s',
sep=',',
),
transform_order=dict(argstr='--transform_order %s',
),
transformationFile=dict(argstr='--transformationFile %s',
),
window_function=dict(argstr='--window_function %s',
),
)
inputs = ResampleScalarVectorDWIVolume.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ResampleScalarVectorDWIVolume_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = ResampleScalarVectorDWIVolume.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
joakim-hove/ert | cmake/create_cmakelists.py | 9 | 2387 | #!/usr/bin/env python
from os import listdir
from os.path import isfile, join, isdir, islink
import sys
def findFilesAndDirectories(directory):
all_files = listdir(directory)
files = []
directories = []
for f in all_files:
path = join(directory, f)
if isfile(path) and not f == "CMakeLists.txt" and not islink(path):
files.append(f)
if isdir(path):
directories.append(f)
return sorted(files), sorted(directories)
def findRelativeModulePath(directory):
"""@type directory: str"""
index = directory.rfind("python/")
index += len("python/")
return directory[index:len(directory)]
def createPythonSources(files, test_sources=False):
result = ""
if len(files) > 0:
result = "set(%s\n" % ("PYTHON_SOURCES" if not test_sources else "TEST_SOURCES")
files = [f for f in files if f.endswith(".py")]
for f in files:
result += " " + str(f) + "\n"
if len(files) > 0:
result += ")"
return result
def addSubDirectories(directories):
result = ""
for d in directories:
result += "add_subdirectory(" + str(d) + ")\n"
return result
def addPythonPackage(relative_module_path, test_sources=False):
module_name = ".".join(relative_module_path.split("/"))
source_type = "PYTHON_SOURCES" if not test_sources else "TEST_SOURCES"
template = "add_python_package(\"python.%s\" ${PYTHON_INSTALL_PREFIX}/%s \"${%s}\" %s)"
install = "False" if test_sources else "True"
return template % (module_name, relative_module_path, source_type, install)
def addInclude(filename):
with open(filename, "r") as include_file:
content = include_file.read()
return content
files, directories = findFilesAndDirectories(sys.argv[1])
module_path = findRelativeModulePath(sys.argv[1])
output_file = join(sys.argv[1], "CMakeLists.txt")
test_sources = module_path.startswith("tests")
with open(output_file, "w+") as text_file:
text_file.write(createPythonSources(files, test_sources=test_sources))
text_file.write("\n\n")
text_file.write(addPythonPackage(module_path, test_sources=test_sources))
text_file.write("\n\n")
text_file.write(addSubDirectories(directories))
if "local.cmake" in files:
text_file.write("\n\n")
text_file.write(addInclude(join(sys.argv[1], "local.cmake")))
| gpl-3.0 |
prasannav7/ggrc-core | src/ggrc_basic_permissions/roles/Creator.py | 2 | 7128 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "System"
description = """
This role grants a user basic object creation and editing permission.
"""
owner_base = [
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
{
"type": "Issue",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Assessment",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Control",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "DataAsset",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "AccessGroup",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Directive",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Contract",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Policy",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Regulation",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Standard",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Facility",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Market",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Objective",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
"ObjectDocument",
"ObjectOwner",
"ObjectPerson",
{
"type": "Option",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "OrgGroup",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Vendor",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Product",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Section",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Clause",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "SystemOrProcess",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "System",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Process",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Project",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
"CustomAttributeDefinition",
"CustomAttributeValue",
]
owner_read = owner_base + [
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "read"
},
"condition": "relationship",
},
"Role",
"UserRole",
"Context",
"Person",
]
owner_update = owner_base + [
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "update"
},
"condition": "relationship",
},
{
"type": "Comment",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
]
permissions = {
"read": owner_read,
"create": [
"Workflow"
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"Comment",
"Assessment",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectDocument",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "update"
},
"condition": "relationship",
},
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
{
"type": "ObjectOwner",
"terms": {
"property_name": "ownable.modified_by",
"value": "$current_user"
},
"condition": "is"
},
"Program",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"view_object_page": owner_read,
"update": owner_update,
"delete": owner_update,
}
| apache-2.0 |
dnlm92/chokoretto | temp/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py | 306 | 17184 | """setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import marshal
import textwrap
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.compat import basestring
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
| mit |
dnlm92/chokoretto | temp/venv/lib/python2.7/site-packages/oauthlib/oauth2/rfc6749/endpoints/authorization.py | 71 | 4591 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import Request
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class AuthorizationEndpoint(BaseEndpoint):
"""Authorization endpoint - used by the client to obtain authorization
from the resource owner via user-agent redirection.
The authorization endpoint is used to interact with the resource
owner and obtain an authorization grant. The authorization server
MUST first verify the identity of the resource owner. The way in
which the authorization server authenticates the resource owner (e.g.
username and password login, session cookies) is beyond the scope of
this specification.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the authorization endpoint result in user
authentication and the transmission of clear-text credentials (in the
HTTP response), the authorization server MUST require the use of TLS
as described in Section 1.6 when sending requests to the
authorization endpoint::
# We will deny any request which URI schema is not with https
The authorization server MUST support the use of the HTTP "GET"
method [RFC2616] for the authorization endpoint, and MAY support the
use of the "POST" method as well::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Enforced through the design of oauthlib.common.Request
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_response_type, default_token_type,
response_types):
BaseEndpoint.__init__(self)
self._response_types = response_types
self._default_response_type = default_response_type
self._default_token_type = default_token_type
@property
def response_types(self):
return self._response_types
@property
def default_response_type(self):
return self._default_response_type
@property
def default_response_type_handler(self):
return self.response_types.get(self.default_response_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_authorization_response(self, uri, http_method='GET', body=None,
headers=None, scopes=None, credentials=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = scopes
# TODO: decide whether this should be a required argument
request.user = None # TODO: explain this in docs
for k, v in (credentials or {}).items():
setattr(request, k, v)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
log.debug('Dispatching response_type %s request to %r.',
request.response_type, response_type_handler)
return response_type_handler.create_authorization_response(
request, self.default_token_type)
@catch_errors_and_unavailability
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = None
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request)
| mit |
DPaaS-Raksha/raksha | raksha/openstack/common/scheduler/filters/extra_specs_ops.py | 1 | 2336 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from raksha.openstack.common import strutils
# 1. The following operations are supported:
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <is>, <or>, ==, !=, >=, <=
# 2. Note that <or> is handled in a different way below.
# 3. If the first word in the extra_specs is not one of the operators,
# it is ignored.
_op_methods = {'=': lambda x, y: float(x) >= float(y),
'<in>': lambda x, y: y in x,
'<is>': lambda x, y: (strutils.bool_from_string(x) is
strutils.bool_from_string(y)),
'==': lambda x, y: float(x) == float(y),
'!=': lambda x, y: float(x) != float(y),
'>=': lambda x, y: float(x) >= float(y),
'<=': lambda x, y: float(x) <= float(y),
's==': operator.eq,
's!=': operator.ne,
's<': operator.lt,
's<=': operator.le,
's>': operator.gt,
's>=': operator.ge}
def match(value, req):
words = req.split()
op = method = None
if words:
op = words.pop(0)
method = _op_methods.get(op)
if op != '<or>' and not method:
return value == req
if value is None:
return False
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
while True:
if words.pop(0) == value:
return True
if not words:
break
op = words.pop(0) # remove a keyword <or>
if not words:
break
return False
try:
if words and method(value, words[0]):
return True
except ValueError:
pass
return False
| apache-2.0 |
jaapp/android_kernel_samsung_trltexx | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
skolome/son-examples | vnfs/sonata-son-emu-sap-docker/iperf_server.py | 1 | 7097 | #!/usr/bin/env python
from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \
pushadd_to_gateway, push_to_gateway, delete_from_gateway
from subprocess import Popen, PIPE, STDOUT
import threading
from time import sleep
import pty
import os
import re
import logging
logging.basicConfig(level=logging.INFO)
MAX_READ = 1024
# vnf configuration parameters
vnf_name = 'profile_sink'
#pushgateway = 'localhost:9091'
pushgateway = '172.17.0.1:9091'
class iperf():
def __init__(self, option_string=''):
self.read_loop = True
options = option_string.split(' ')
cmd = ['iperf'] + options
cmd_str = 'iperf '+ option_string
master, slave = pty.openpty()
self.process = Popen(cmd, stdout=slave, stderr=slave, close_fds=False)
self.stdout = os.fdopen( master, 'r', 10000 )
# buffer which holds the iperf process output to read from
self.readbuf = ''
self.test_str = ''
self.test_end = False
# Prometheus export data
# helper variables to calculate the metrics
self.registry = CollectorRegistry()
#buckets = (0.1, 0.2, 0.5, 1, 2, 5, 7, 10, 20, 50, 70, 90, float("inf"))
self.prom_loss = Gauge('sonemu_packet_loss_percent', 'iperf packet loss (percent)',
['vnf_name'], registry=self.registry)
self.prom_packets_loss = Gauge('sonemu_packets_loss_count', 'iperf packets lost (count)',
['vnf_name'], registry=self.registry)
self.prom_packets_total = Gauge('sonemu_packets_total_count', 'iperf packets total (count)',
['vnf_name'], registry=self.registry)
#buckets = (1, 9, 10, 11, 90, 100, 110, 900, 1000, 1100, float("inf"))
self.prom_bandwith = Gauge('sonemu_bandwith_Mbitspersec', 'iperf bandwith (Mbits/sec)',
['vnf_name'], registry=self.registry)
#buckets = (0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 5, 10, float("inf"))
self.prom_jitter = Gauge('sonemu_jitter_ms', 'iperf jitter (ms)',
['vnf_name'], registry=self.registry)
while True:
data = self.readline()
if data :
logging.info('stdout: {0}'.format(data))
self.parse_beginning_of_test(data)
self.parse_end_of_test(data)
if not self.test_end:
bw = self.parse_bandwith(data)
if bw > 0:
self.prom_bandwith.labels({'vnf_name': vnf_name}).set(bw)
else:
# end of iperf test, no real measurement
continue
loss = self.parse_loss(data)
self.prom_loss.labels({'vnf_name': vnf_name}).set(loss)
lost, total = self.parse_packets(data)
if lost and total:
self.prom_packets_total.labels({'vnf_name': vnf_name}).set(total)
self.prom_packets_loss.labels({'vnf_name': vnf_name}).set(lost)
jitter = self.parse_jitter(data)
self.prom_jitter.labels({'vnf_name': vnf_name}).set(jitter)
else:
self.prom_loss.labels({'vnf_name': vnf_name}).set(float('nan'))
self.prom_jitter.labels({'vnf_name': vnf_name}).set(float('nan'))
pushadd_to_gateway(pushgateway, job='sonemu-profiler', registry=self.registry)
def read( self, maxbytes=MAX_READ ):
"""Buffered read from node, potentially blocking.
maxbytes: maximum number of bytes to return"""
count = len( self.readbuf )
if count < maxbytes:
data = os.read( self.stdout.fileno(), maxbytes - count )
self.readbuf += data.decode("utf-8") # need to decode bytes to string
if maxbytes >= len( self.readbuf ):
result = self.readbuf
self.readbuf = ''
else:
result = self.readbuf[ :maxbytes ]
self.readbuf = self.readbuf[ maxbytes: ]
return result
def readline(self):
"""Buffered readline from node, potentially blocking.
returns: line (minus newline) or None"""
pos = self.readbuf.find('\n')
if pos >=0:
line = self.readbuf[0: pos]
# logging.info('stdout: {0}'.format(line))
# self.parse_loss(line)
self.readbuf = self.readbuf[(pos + 1):]
return line
else:
test_str = self.read(MAX_READ) # get MAX_READ bytes of the buffer
self.readbuf = self.readbuf + test_str
return None
def parse_loss(self,iperf_line):
loss = re.search('(\()((\d+\.)?\d+)(\%\))', iperf_line)
if loss:
logging.info('loss: {0} percent'.format(loss.group(2)))
return float(loss.group(2))
else:
logging.info('no loss found')
return float('nan')
def parse_bandwith(self, iperf_line):
bw = re.search('(\d+\.?\d+)(\sMbits\/sec)', iperf_line)
if bw:
logging.info('bw: {0} Mbits/sec'.format(bw.group(1)))
return float(bw.group(1))
else:
return 0
def parse_packets(self, iperf_line):
match = re.search('(\d+)\/(\d+)', iperf_line)
if match:
lost = match.group(1)
total = match.group(2)
logging.info('packets lost: {0} total: {1}'.format(lost, total))
return int(lost), int(total)
else:
return None, None
def parse_jitter(self, iperf_line):
match = re.search('(\d+\.\d+)\sms', iperf_line)
if match:
logging.info('jitter: {0} ms'.format(match.group(1)))
return float(match.group(1))
else:
logging.info('no jitter found')
return float('nan')
def parse_end_of_test(self, iperf_line):
match = re.search('(-\s-\s)+', iperf_line)
if match:
logging.info('end: {0} '.format(match.group(1)))
self.test_end = True
return match
def parse_beginning_of_test(self, iperf_line):
match = re.search('(--)+', iperf_line)
if match:
logging.info('begin: {0} '.format(match.group(1)))
self.test_end = False
return match
def read_stdout(self):
while self.read_loop:
print('read')
self.process.stdout.flush()
output = self.process.stdout.readline()
if output == '' and self.process.poll() is not None:
break
if output:
logging.info('stdout: {0}'.format(output))
if __name__ == "__main__":
#min is 12bytes
#iperf_server = iperf('-s -u -l18 -i -fm')
iperf_server = iperf('-s -u -i -fm')
#iperf_client = iperf('-c localhost -u -i1')
| apache-2.0 |
sfairhur/pycbc | tools/timing/banksim/banksim.py | 10 | 12363 | #! /usr/bin/env python
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
from time import sleep
import sys
from numpy import loadtxt,complex64,float32
from optparse import OptionParser
from glue.ligolw import utils as ligolw_utils
from glue.ligolw import table, lsctables
from math import pow
from scipy.interpolate import interp1d
from pycbc.utils import mass1_mass2_to_mchirp_eta
from pycbc.waveform import get_td_waveform, get_fd_waveform, td_approximants, fd_approximants
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries, TimeSeries, zeros, real_same_precision_as, complex_same_precision_as
from pycbc.filter import match, sigmasq, resample_to_delta_t
from pycbc.scheme import DefaultScheme, CUDAScheme, OpenCLScheme
from pycbc.fft import fft
from math import cos, sin
import pycbc.psd
def update_progress(progress):
print('\r\r[{0}] {1:.2%}'.format('#'*(int(progress*100)/2)+' '*(50-int(progress*100)/2), progress), end=' ')
if progress == 100:
print("Done")
sys.stdout.flush()
## Remove the need for these functions ########################################
def generate_fplus_fcross(latitude,longitude,polarization):
f_plus = - (1.0/2.0) * (1.0 + cos(latitude)*cos(latitude)) * cos (2.0 * longitude) * cos (2.0 * polarization) - cos(latitude) * sin(2.0*longitude) * sin (2.0 * polarization)
f_cross= (1.0/2.0) * (1.0 + cos(latitude)*cos(latitude)) * cos (2.0 * longitude) * sin (2.0* polarization) - cos (latitude) * sin(2.0*longitude) * cos (2.0 * polarization)
return f_plus, f_cross
def generate_detector_strain(template_params, h_plus, h_cross):
latitude = 0
longitude = 0
polarization = 0
if hasattr(template_params, 'latitude'):
latitude = template_params.latitude
if hasattr(template_params, 'longitude'):
longitude = template_params.longitude
if hasattr(template_params, 'polarization'):
polarization = template_params.polarization
f_plus, f_cross = generate_fplus_fcross(latitude, longitude, polarization)
return (f_plus*h_plus+f_cross*h_cross)
def make_padded_frequency_series(vec,filter_N=None):
"""Pad a TimeSeries with a length of zeros greater than its length, such
that the total length is the closest power of 2. This prevents the effects
of wraparound.
"""
if filter_N is None:
power = ceil(log(len(vec),2))+1
N = 2 ** power
else:
N = filter_N
n = N/2+1
if isinstance(vec,FrequencySeries):
vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)),
delta_f=1.0,copy=False)
if len(vectilde) < len(vec):
cplen = len(vectilde)
else:
cplen = len(vec)
vectilde[0:cplen] = vec[0:cplen]
delta_f = vec.delta_f
if isinstance(vec,TimeSeries):
vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t,
dtype=real_same_precision_as(vec))
vec_pad[0:len(vec)] = vec
delta_f = 1.0/(vec.delta_t*N)
vectilde = FrequencySeries(zeros(n),delta_f=1.0,
dtype=complex_same_precision_as(vec))
fft(vec_pad,vectilde)
vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64)
return vectilde
def get_waveform(approximant, phase_order, amplitude_order, template_params, start_frequency, sample_rate, length):
if approximant in td_approximants():
hplus,hcross = get_td_waveform(template_params, approximant=approximant,
phase_order=phase_order, delta_t=1.0 / sample_rate,
f_lower=start_frequency, amplitude_order=amplitude_order)
hvec = generate_detector_strain(template_params, hplus, hcross)
elif approximant in fd_approximants():
delta_f = sample_rate / length
hvec = get_fd_waveform(template_params, approximant=approximant,
phase_order=phase_order, delta_f=delta_f,
f_lower=start_frequency, amplitude_order=amplitude_order)
htilde = make_padded_frequency_series(hvec,filter_N)
return htilde
###############################################################################
#File output Settings
parser = OptionParser()
parser.add_option("--match-file", dest="out_file", help="file to output match results", metavar="FILE")
#PSD Settings
parser.add_option("--asd-file", dest="asd_file", help="two-column ASCII file containing ASD data", metavar="FILE")
parser.add_option("--psd", dest="psd", help="Analytic PSD model from LALSimulation", choices=pycbc.psd.get_lalsim_psd_list())
aprs = list(set(td_approximants() + fd_approximants()))
#Template Settings
parser.add_option("--template-file", dest="bank_file", help="SimInspiral or SnglInspiral XML file containing the template parameters.", metavar="FILE")
parser.add_option("--template-approximant",help="Template Approximant Name: " + str(aprs), choices = aprs)
parser.add_option("--template-phase-order",help="PN order to use for the phase",default=-1,type=int)
parser.add_option("--template-amplitude-order",help="PN order to use for the amplitude",default=-1,type=int)
parser.add_option("--template-start-frequency",help="Starting frequency for injections",type=float)
parser.add_option("--template-sample-rate",help="Starting frequency for injections",type=float)
#Signal Settings
parser.add_option("--signal-file", dest="sim_file", help="SimInspiral or SnglInspiral XML file containing the signal parameters.", metavar="FILE")
parser.add_option("--signal-approximant",help="Signal Approximant Name: " + str(aprs), choices = aprs)
parser.add_option("--signal-phase-order",help="PN order to use for the phase",default=-1,type=int)
parser.add_option("--signal-amplitude-order",help="PN order to use for the amplitude",default=-1,type=int)
parser.add_option("--signal-start-frequency",help="Starting frequency for templates",type=float)
parser.add_option("--signal-sample-rate",help="Starting frequency for templates",type=float)
#Filtering Settings
parser.add_option('--filter-low-frequency-cutoff', metavar='FREQ', help='low frequency cutoff of matched filter', type=float)
parser.add_option("--filter-sample-rate",help="Filter Sample Rate [Hz]",type=float)
parser.add_option("--filter-signal-length",help="Length of signal for filtering, shoud be longer than all waveforms and include some padding",type=int)
#Hardware support
parser.add_option("--use-cuda",action="store_true")
#Restricted maximization
parser.add_option("--mchirp-window",type=float)
(options, args) = parser.parse_args()
template_sample_rate = options.filter_sample_rate
signal_sample_rate = options.filter_sample_rate
if options.template_sample_rate:
template_sample_rate = options.template_sample_rate
if options.signal_sample_rate:
template_sample_rate = options.signal_sample_rate
if options.psd and options.asd_file:
parser.error("PSD and asd-file options are mututally exclusive")
if options.use_cuda:
ctx = CUDAScheme()
else:
ctx = DefaultScheme()
print("STARTING THE BANKSIM")
# Load in the template bank file
indoc = ligolw_utils.load_filename(options.bank_file, False)
try :
template_table = table.get_table(indoc, lsctables.SnglInspiralTable.tableName)
except ValueError:
template_table = table.get_table(indoc, lsctables.SimInspiralTable.tableName)
# open the output file where the max overlaps over the bank are stored
fout = open(options.out_file, "w")
fout2 = open(options.out_file+".found", "w")
print("Writing matches to " + options.out_file)
print("Writing recovered template in " + options.out_file+".found")
# Load in the simulation list
indoc = ligolw_utils.load_filename(options.sim_file, False)
try:
signal_table = table.get_table(indoc, lsctables.SimInspiralTable.tableName)
except ValueError:
signal_table = table.get_table(indoc, lsctables.SnglInspiralTable.tableName)
def outside_mchirp_window(template,signal,w):
template_mchirp,et = mass1_mass2_to_mchirp_eta(template.mass1,template.mass2)
signal_mchirp ,et = mass1_mass2_to_mchirp_eta(signal.mass1,signal.mass2)
if abs(signal_mchirp - template_mchirp) > (w*signal_mchirp) :
return True
else :
False
filter_N = int(options.filter_signal_length * options.filter_sample_rate)
filter_n = filter_N / 2 + 1
filter_delta_f = 1.0 / float(options.filter_signal_length)
print("Number of Signal Waveforms: ",len(signal_table))
print("Number of Templates : ",len(template_table))
print("Reading and Interpolating PSD")
if options.asd_file:
psd = pycbc.psd.read.from_txt(options.asd_file, filter_n, filter_delta_f,
options.filter_low_frequency_cutoff)
elif options.psd:
psd = pycbc.psd.analytic.from_string(options.psd, filter_n, filter_delta_f,
options.filter_low_frequency_cutoff)
psd *= DYN_RANGE_FAC **2
psd = FrequencySeries(psd,delta_f=psd.delta_f,dtype=float32)
with ctx:
print("Pregenerating Signals")
signals = []
index = 0
for signal_params in signal_table:
index += 1
update_progress(index/len(signal_table))
stilde = get_waveform(options.signal_approximant,
options.signal_phase_order,
options.signal_amplitude_order,
signal_params,
options.signal_start_frequency,
options.filter_sample_rate,
filter_N)
s_norm = sigmasq(stilde, psd=psd,
low_frequency_cutoff=options.filter_low_frequency_cutoff)
stilde /= psd
signals.append( (stilde, s_norm, [], signal_params) )
print("Calculating Overlaps")
index = 0
# Calculate the overlaps
for template_params in template_table:
index += 1
update_progress(float(index)/len(template_table))
h_norm = htilde = None
for stilde, s_norm, matches, signal_params in signals:
# Check if we need to look at this template
if options.mchirp_window and outside_mchirp_window(template_params,
signal_params, options.mchirp_window):
matches.append(-1)
continue
# Generate htilde if we haven't already done so
if htilde is None:
htilde = get_waveform(options.template_approximant,
options.template_phase_order,
options.template_amplitude_order,
template_params,
options.template_start_frequency,
options.filter_sample_rate,
filter_N)
h_norm = sigmasq(htilde, psd=psd,
low_frequency_cutoff=options.filter_low_frequency_cutoff)
o,i = match(htilde, stilde, h_norm=h_norm, s_norm=s_norm,
low_frequency_cutoff=options.filter_low_frequency_cutoff)
matches.append(o)
#Find the maximum overlap in the bank and output to a file
for stilde, s_norm, matches, sim_template in signals:
match_str= "%5.5f \n" % (max(matches))
match_str2=" "+options.bank_file+" "+str(matches.index(max(matches)))+"\n"
fout.write(match_str)
fout2.write(match_str2)
| gpl-3.0 |
Konubinix/weboob | modules/transilien/test.py | 7 | 1639 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from weboob.capabilities.travel import RoadmapFilters
from weboob.tools.test import BackendTest
class TransilienTest(BackendTest):
MODULE = 'transilien'
def test_stations(self):
stations = list(self.backend.iter_station_search('aul'))
self.assertTrue(len(stations) > 0)
def test_departures(self):
stations = list(self.backend.iter_station_search('paris'))
self.assertTrue(len(stations) > 0)
list(self.backend.iter_station_departures(stations[0].id))
def test_roadmap(self):
filters = RoadmapFilters()
roadmap = list(self.backend.iter_roadmap('aul', u'aub', filters))
self.assertTrue(len(roadmap) > 0)
filters.arrival_time = datetime.datetime.now() + datetime.timedelta(days=1)
roadmap = list(self.backend.iter_roadmap('aul', u'bag', filters))
self.assertTrue(len(roadmap) > 0)
| agpl-3.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_3_0/ipam_sync_broker.py | 12 | 2713 | from ..broker import Broker
class IpamSyncBroker(Broker):
controller = "ipam_sync"
def status(self, **kwargs):
"""Gets the highest SeqNo available currently of a given IPAM object.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param type: The ipam object type to indicate either "network" or "ip".
:type type: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return seq_no: The highest sequence number of a given IPAM object
:rtype seq_no: Integer
"""
return self.api_request(self._get_method_fullname("status"), kwargs)
def send_refresh(self, **kwargs):
"""Sends refresh message to ipam sync queue consumed by ipam_syncd.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param type: The ipam object type to indicate either "network" or "ip".
:type type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param since_seq_no: sequence number to start from to send ipam objects
:type since_seq_no: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("send_refresh"), kwargs)
def send_ip_objects_by_range(self, **kwargs):
"""Sends refresh message to ipam ip sync queue consumed by ipam_ip_syncd.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param network_view_id: network view id
:type network_view_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param ip_start: start of ip address range
:type ip_start: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param ip_end: end of ip address range
:type ip_end: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("send_ip_objects_by_range"), kwargs)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.